// Copyright 2022 Luca Casonato. All rights reserved. MIT license. /** * Vertex AI API Client for Deno * ============================= * * Train high-quality custom machine learning models with minimal machine learning expertise and effort. * * Docs: https://cloud.google.com/vertex-ai/ * Source: https://googleapis.deno.dev/v1/aiplatform:v1.ts */ import { auth, CredentialsClient, GoogleAuth, request } from "/_/base@v1/mod.ts"; export { auth, GoogleAuth }; export type { CredentialsClient }; /** * Train high-quality custom machine learning models with minimal machine * learning expertise and effort. */ export class AIplatform { #client: CredentialsClient | undefined; #baseUrl: string; constructor(client?: CredentialsClient, baseUrl: string = "https://aiplatform.googleapis.com/") { this.#client = client; this.#baseUrl = baseUrl; } /** * Creates a Dataset. * */ async datasetsCreate(req: GoogleCloudAiplatformV1Dataset, opts: DatasetsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/datasets`); if (opts.parent !== undefined) { url.searchParams.append("parent", String(opts.parent)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Create a version from a Dataset. * * @param parent Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async datasetsDatasetVersionsCreate(parent: string, req: GoogleCloudAiplatformV1DatasetVersion): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async datasetsDatasetVersionsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async datasetsDatasetVersionsGet(name: string, opts: DatasetsDatasetVersionsGetOptions = {}): Promise { opts = serializeDatasetsDatasetVersionsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Lists DatasetVersions in a Dataset. * * @param parent Required. The resource name of the Dataset to list DatasetVersions from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async datasetsDatasetVersionsList(parent: string, opts: DatasetsDatasetVersionsListOptions = {}): Promise { opts = serializeDatasetsDatasetVersionsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetVersionsResponse; } /** * Updates a DatasetVersion. * * @param name Output only. Identifier. The resource name of the DatasetVersion. */ async datasetsDatasetVersionsPatch(name: string, req: GoogleCloudAiplatformV1DatasetVersion, opts: DatasetsDatasetVersionsPatchOptions = {}): Promise { opts = serializeDatasetsDatasetVersionsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Restores a dataset version. * * @param name Required. The name of the DatasetVersion resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async datasetsDatasetVersionsRestore(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:restore`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset. * * @param name Required. The resource name of the Dataset to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async datasetsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset. * * @param name Required. The name of the Dataset resource. */ async datasetsGet(name: string, opts: DatasetsGetOptions = {}): Promise { opts = serializeDatasetsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Dataset; } /** * Lists Datasets in a Location. * */ async datasetsList(opts: DatasetsListOptions = {}): Promise { opts = serializeDatasetsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/datasets`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.parent !== undefined) { url.searchParams.append("parent", String(opts.parent)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetsResponse; } /** * Updates a Dataset. * * @param name Output only. Identifier. The resource name of the Dataset. */ async datasetsPatch(name: string, req: GoogleCloudAiplatformV1Dataset, opts: DatasetsPatchOptions = {}): Promise { opts = serializeDatasetsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Dataset; } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async endpointsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Upload a file into a RagCorpus. * * @param parent Required. The name of the RagCorpus resource into which to upload the file. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async mediaUpload(parent: string, req: GoogleCloudAiplatformV1UploadRagFileRequest): Promise { req = serializeGoogleCloudAiplatformV1UploadRagFileRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/ragFiles:upload`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1UploadRagFileResponse(data); } /** * Gets a GenAI cache config. * * @param name Required. Name of the cache config. Format: - `projects/{project}/cacheConfig`. */ async projectsGetCacheConfig(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1CacheConfig; } /** * Given an input prompt, it returns augmented prompt from vertex rag store * to guide LLM towards generating grounded responses. * * @param parent Required. The resource name of the Location from which to augment prompt. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. */ async projectsLocationsAugmentPrompt(parent: string, req: GoogleCloudAiplatformV1AugmentPromptRequest): Promise { req = serializeGoogleCloudAiplatformV1AugmentPromptRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }:augmentPrompt`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1AugmentPromptResponse(data); } /** * Cancels a BatchPredictionJob. Starts asynchronous cancellation on the * BatchPredictionJob. The server makes the best effort to cancel the job, but * success is not guaranteed. Clients can use JobService.GetBatchPredictionJob * or other methods to check whether the cancellation succeeded or whether the * job completed despite cancellation. On a successful cancellation, the * BatchPredictionJob is not deleted;instead its BatchPredictionJob.state is * set to `CANCELLED`. Any files already outputted by the job are not deleted. * * @param name Required. The name of the BatchPredictionJob to cancel. Format: `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` */ async projectsLocationsBatchPredictionJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelBatchPredictionJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a BatchPredictionJob. A BatchPredictionJob once created will right * away be attempted to start. * * @param parent Required. The resource name of the Location to create the BatchPredictionJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsBatchPredictionJobsCreate(parent: string, req: GoogleCloudAiplatformV1BatchPredictionJob): Promise { req = serializeGoogleCloudAiplatformV1BatchPredictionJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/batchPredictionJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1BatchPredictionJob(data); } /** * Deletes a BatchPredictionJob. Can only be called on jobs that already * finished. * * @param name Required. The name of the BatchPredictionJob resource to be deleted. Format: `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` */ async projectsLocationsBatchPredictionJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a BatchPredictionJob * * @param name Required. The name of the BatchPredictionJob resource. Format: `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` */ async projectsLocationsBatchPredictionJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1BatchPredictionJob(data); } /** * Lists BatchPredictionJobs in a Location. * * @param parent Required. The resource name of the Location to list the BatchPredictionJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsBatchPredictionJobsList(parent: string, opts: ProjectsLocationsBatchPredictionJobsListOptions = {}): Promise { opts = serializeProjectsLocationsBatchPredictionJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/batchPredictionJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListBatchPredictionJobsResponse(data); } /** * Creates cached content, this call will initialize the cached content in * the data storage, and users need to pay for the cache data storage. * * @param parent Required. The parent resource where the cached content will be created */ async projectsLocationsCachedContentsCreate(parent: string, req: GoogleCloudAiplatformV1CachedContent): Promise { req = serializeGoogleCloudAiplatformV1CachedContent(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/cachedContents`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1CachedContent(data); } /** * Deletes cached content * * @param name Required. The resource name referring to the cached content */ async projectsLocationsCachedContentsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets cached content configurations * * @param name Required. The resource name referring to the cached content */ async projectsLocationsCachedContentsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1CachedContent(data); } /** * Lists cached contents in a project * * @param parent Required. The parent, which owns this collection of cached contents. */ async projectsLocationsCachedContentsList(parent: string, opts: ProjectsLocationsCachedContentsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/cachedContents`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListCachedContentsResponse(data); } /** * Updates cached content configurations * * @param name Immutable. Identifier. The server-generated resource name of the cached content Format: projects/{project}/locations/{location}/cachedContents/{cached_content} */ async projectsLocationsCachedContentsPatch(name: string, req: GoogleCloudAiplatformV1CachedContent, opts: ProjectsLocationsCachedContentsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1CachedContent(req); opts = serializeProjectsLocationsCachedContentsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1CachedContent(data); } /** * Given an input text, it returns a score that evaluates the factuality of * the text. It also extracts and returns claims from the text and provides * supporting facts. * * @param parent Required. The resource name of the Location from which to corroborate text. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. */ async projectsLocationsCorroborateContent(parent: string, req: GoogleCloudAiplatformV1CorroborateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1CorroborateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }:corroborateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CorroborateContentResponse; } /** * Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. * The server makes a best effort to cancel the job, but success is not * guaranteed. Clients can use JobService.GetCustomJob or other methods to * check whether the cancellation succeeded or whether the job completed * despite cancellation. On successful cancellation, the CustomJob is not * deleted; instead it becomes a job with a CustomJob.error value with a * google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * CustomJob.state is set to `CANCELLED`. * * @param name Required. The name of the CustomJob to cancel. Format: `projects/{project}/locations/{location}/customJobs/{custom_job}` */ async projectsLocationsCustomJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelCustomJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a CustomJob. A created CustomJob right away will be attempted to * be run. * * @param parent Required. The resource name of the Location to create the CustomJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsCustomJobsCreate(parent: string, req: GoogleCloudAiplatformV1CustomJob): Promise { req = serializeGoogleCloudAiplatformV1CustomJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/customJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1CustomJob(data); } /** * Deletes a CustomJob. * * @param name Required. The name of the CustomJob resource to be deleted. Format: `projects/{project}/locations/{location}/customJobs/{custom_job}` */ async projectsLocationsCustomJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a CustomJob. * * @param name Required. The name of the CustomJob resource. Format: `projects/{project}/locations/{location}/customJobs/{custom_job}` */ async projectsLocationsCustomJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1CustomJob(data); } /** * Lists CustomJobs in a Location. * * @param parent Required. The resource name of the Location to list the CustomJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsCustomJobsList(parent: string, opts: ProjectsLocationsCustomJobsListOptions = {}): Promise { opts = serializeProjectsLocationsCustomJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/customJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListCustomJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsCustomJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsCustomJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsCustomJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsCustomJobsOperationsList(name: string, opts: ProjectsLocationsCustomJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsCustomJobsOperationsWait(name: string, opts: ProjectsLocationsCustomJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsCustomJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Cancels a DataLabelingJob. Success of cancellation is not guaranteed. * * @param name Required. The name of the DataLabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` */ async projectsLocationsDataLabelingJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelDataLabelingJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a DataLabelingJob. * * @param parent Required. The parent of the DataLabelingJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDataLabelingJobsCreate(parent: string, req: GoogleCloudAiplatformV1DataLabelingJob): Promise { req = serializeGoogleCloudAiplatformV1DataLabelingJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/dataLabelingJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1DataLabelingJob(data); } /** * Deletes a DataLabelingJob. * * @param name Required. The name of the DataLabelingJob to be deleted. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` */ async projectsLocationsDataLabelingJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a DataLabelingJob. * * @param name Required. The name of the DataLabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` */ async projectsLocationsDataLabelingJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1DataLabelingJob(data); } /** * Lists DataLabelingJobs in a Location. * * @param parent Required. The parent of the DataLabelingJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDataLabelingJobsList(parent: string, opts: ProjectsLocationsDataLabelingJobsListOptions = {}): Promise { opts = serializeProjectsLocationsDataLabelingJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/dataLabelingJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListDataLabelingJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDataLabelingJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDataLabelingJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDataLabelingJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDataLabelingJobsOperationsList(name: string, opts: ProjectsLocationsDataLabelingJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDataLabelingJobsOperationsWait(name: string, opts: ProjectsLocationsDataLabelingJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDataLabelingJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Gets an AnnotationSpec. * * @param name Required. The name of the AnnotationSpec resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}` */ async projectsLocationsDatasetsAnnotationSpecsGet(name: string, opts: ProjectsLocationsDatasetsAnnotationSpecsGetOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsAnnotationSpecsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1AnnotationSpec; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsAnnotationSpecsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsAnnotationSpecsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsAnnotationSpecsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsAnnotationSpecsOperationsList(name: string, opts: ProjectsLocationsDatasetsAnnotationSpecsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsAnnotationSpecsOperationsWait(name: string, opts: ProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Creates a Dataset. * * @param parent Required. The resource name of the Location to create the Dataset in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDatasetsCreate(parent: string, req: GoogleCloudAiplatformV1Dataset): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/datasets`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists Annotations belongs to a dataitem This RPC is only available in * InternalDatasetService. It is only used for exporting conversation data to * CCAI Insights. * * @param parent Required. The resource name of the DataItem to list Annotations from. Format: `projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}` */ async projectsLocationsDatasetsDataItemsAnnotationsList(parent: string, opts: ProjectsLocationsDatasetsDataItemsAnnotationsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsAnnotationsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/annotations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListAnnotationsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsList(name: string, opts: ProjectsLocationsDatasetsDataItemsAnnotationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsWait(name: string, opts: ProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Lists DataItems in a Dataset. * * @param parent Required. The resource name of the Dataset to list DataItems from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDataItemsList(parent: string, opts: ProjectsLocationsDatasetsDataItemsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/dataItems`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDataItemsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsDataItemsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsDataItemsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsDataItemsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsDataItemsOperationsList(name: string, opts: ProjectsLocationsDatasetsDataItemsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsDataItemsOperationsWait(name: string, opts: ProjectsLocationsDatasetsDataItemsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Create a version from a Dataset. * * @param parent Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDatasetVersionsCreate(parent: string, req: GoogleCloudAiplatformV1DatasetVersion): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async projectsLocationsDatasetsDatasetVersionsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async projectsLocationsDatasetsDatasetVersionsGet(name: string, opts: ProjectsLocationsDatasetsDatasetVersionsGetOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDatasetVersionsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Lists DatasetVersions in a Dataset. * * @param parent Required. The resource name of the Dataset to list DatasetVersions from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDatasetVersionsList(parent: string, opts: ProjectsLocationsDatasetsDatasetVersionsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDatasetVersionsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetVersionsResponse; } /** * Updates a DatasetVersion. * * @param name Output only. Identifier. The resource name of the DatasetVersion. */ async projectsLocationsDatasetsDatasetVersionsPatch(name: string, req: GoogleCloudAiplatformV1DatasetVersion, opts: ProjectsLocationsDatasetsDatasetVersionsPatchOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDatasetVersionsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Restores a dataset version. * * @param name Required. The name of the DatasetVersion resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async projectsLocationsDatasetsDatasetVersionsRestore(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:restore`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset. * * @param name Required. The resource name of the Dataset to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Exports data from a Dataset. * * @param name Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsExport(name: string, req: GoogleCloudAiplatformV1ExportDataRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:export`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset. * * @param name Required. The name of the Dataset resource. */ async projectsLocationsDatasetsGet(name: string, opts: ProjectsLocationsDatasetsGetOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Dataset; } /** * Imports data into a Dataset. * * @param name Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsImport(name: string, req: GoogleCloudAiplatformV1ImportDataRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:import`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists Datasets in a Location. * * @param parent Required. The name of the Dataset's parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDatasetsList(parent: string, opts: ProjectsLocationsDatasetsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/datasets`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsOperationsList(name: string, opts: ProjectsLocationsDatasetsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsOperationsWait(name: string, opts: ProjectsLocationsDatasetsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a Dataset. * * @param name Output only. Identifier. The resource name of the Dataset. */ async projectsLocationsDatasetsPatch(name: string, req: GoogleCloudAiplatformV1Dataset, opts: ProjectsLocationsDatasetsPatchOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Dataset; } /** * Deletes a SavedQuery. * * @param name Required. The resource name of the SavedQuery to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/savedQueries/{saved_query}` */ async projectsLocationsDatasetsSavedQueriesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Lists SavedQueries in a Dataset. * * @param parent Required. The resource name of the Dataset to list SavedQueries from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsSavedQueriesList(parent: string, opts: ProjectsLocationsDatasetsSavedQueriesListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsSavedQueriesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/savedQueries`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListSavedQueriesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsSavedQueriesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsSavedQueriesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsSavedQueriesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsSavedQueriesOperationsList(name: string, opts: ProjectsLocationsDatasetsSavedQueriesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsSavedQueriesOperationsWait(name: string, opts: ProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Searches DataItems in a Dataset. * * @param dataset Required. The resource name of the Dataset from which to search DataItems. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsSearchDataItems(dataset: string, opts: ProjectsLocationsDatasetsSearchDataItemsOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsSearchDataItemsOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ dataset }:searchDataItems`); if (opts.annotationFilters !== undefined) { url.searchParams.append("annotationFilters", String(opts.annotationFilters)); } if (opts.annotationsFilter !== undefined) { url.searchParams.append("annotationsFilter", String(opts.annotationsFilter)); } if (opts.annotationsLimit !== undefined) { url.searchParams.append("annotationsLimit", String(opts.annotationsLimit)); } if (opts.dataItemFilter !== undefined) { url.searchParams.append("dataItemFilter", String(opts.dataItemFilter)); } if (opts.dataLabelingJob !== undefined) { url.searchParams.append("dataLabelingJob", String(opts.dataLabelingJob)); } if (opts.fieldMask !== undefined) { url.searchParams.append("fieldMask", String(opts.fieldMask)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts["orderByAnnotation.orderBy"] !== undefined) { url.searchParams.append("orderByAnnotation.orderBy", String(opts["orderByAnnotation.orderBy"])); } if (opts["orderByAnnotation.savedQuery"] !== undefined) { url.searchParams.append("orderByAnnotation.savedQuery", String(opts["orderByAnnotation.savedQuery"])); } if (opts.orderByDataItem !== undefined) { url.searchParams.append("orderByDataItem", String(opts.orderByDataItem)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.savedQuery !== undefined) { url.searchParams.append("savedQuery", String(opts.savedQuery)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1SearchDataItemsResponse; } /** * Create a DeploymentResourcePool. * * @param parent Required. The parent location resource where this DeploymentResourcePool will be created. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDeploymentResourcePoolsCreate(parent: string, req: GoogleCloudAiplatformV1CreateDeploymentResourcePoolRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/deploymentResourcePools`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Delete a DeploymentResourcePool. * * @param name Required. The name of the DeploymentResourcePool to delete. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Get a DeploymentResourcePool. * * @param name Required. The name of the DeploymentResourcePool to retrieve. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1DeploymentResourcePool; } /** * List DeploymentResourcePools in a location. * * @param parent Required. The parent Location which owns this collection of DeploymentResourcePools. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDeploymentResourcePoolsList(parent: string, opts: ProjectsLocationsDeploymentResourcePoolsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/deploymentResourcePools`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDeploymentResourcePoolsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDeploymentResourcePoolsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDeploymentResourcePoolsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDeploymentResourcePoolsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDeploymentResourcePoolsOperationsList(name: string, opts: ProjectsLocationsDeploymentResourcePoolsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDeploymentResourcePoolsOperationsWait(name: string, opts: ProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Update a DeploymentResourcePool. * * @param name Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsPatch(name: string, req: GoogleCloudAiplatformV1DeploymentResourcePool, opts: ProjectsLocationsDeploymentResourcePoolsPatchOptions = {}): Promise { opts = serializeProjectsLocationsDeploymentResourcePoolsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * List DeployedModels that have been deployed on this * DeploymentResourcePool. * * @param deploymentResourcePool Required. The name of the target DeploymentResourcePool to query. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsQueryDeployedModels(deploymentResourcePool: string, opts: ProjectsLocationsDeploymentResourcePoolsQueryDeployedModelsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ deploymentResourcePool }:queryDeployedModels`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1QueryDeployedModelsResponse; } /** * Exposes an OpenAI-compatible endpoint for chat completions. * * @param endpoint Required. The name of the endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsChatCompletions(endpoint: string, req: GoogleApiHttpBody): Promise { req = serializeGoogleApiHttpBody(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }/chat/completions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async projectsLocationsEndpointsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Creates an Endpoint. * * @param parent Required. The resource name of the Location to create the Endpoint in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsEndpointsCreate(parent: string, req: GoogleCloudAiplatformV1Endpoint, opts: ProjectsLocationsEndpointsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Endpoint(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/endpoints`); if (opts.endpointId !== undefined) { url.searchParams.append("endpointId", String(opts.endpointId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes an Endpoint. * * @param name Required. The name of the Endpoint resource to be deleted. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Deploys a Model into this Endpoint, creating a DeployedModel within it. * * @param endpoint Required. The name of the Endpoint resource into which to deploy a Model. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDeployModel(endpoint: string, req: GoogleCloudAiplatformV1DeployModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:deployModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Perform an unary online prediction request to a gRPC model server for * Vertex first-party products and frameworks. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDirectPredict(endpoint: string, req: GoogleCloudAiplatformV1DirectPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1DirectPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:directPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1DirectPredictResponse(data); } /** * Perform an unary online prediction request to a gRPC model server for * custom containers. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDirectRawPredict(endpoint: string, req: GoogleCloudAiplatformV1DirectRawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1DirectRawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:directRawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1DirectRawPredictResponse(data); } /** * Perform an online explanation. If deployed_model_id is specified, the * corresponding DeployModel must have explanation_spec populated. If * deployed_model_id is not specified, all DeployedModels must have * explanation_spec populated. * * @param endpoint Required. The name of the Endpoint requested to serve the explanation. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsExplain(endpoint: string, req: GoogleCloudAiplatformV1ExplainRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:explain`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1ExplainResponse; } /** * Fetch an asynchronous online prediction operation. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` */ async projectsLocationsEndpointsFetchPredictOperation(endpoint: string, req: GoogleCloudAiplatformV1FetchPredictOperationRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:fetchPredictOperation`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Gets an Endpoint. * * @param name Required. The name of the Endpoint resource. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Endpoint(data); } /** * Lists Endpoints in a Location. * * @param parent Required. The resource name of the Location from which to list the Endpoints. Format: `projects/{project}/locations/{location}` */ async projectsLocationsEndpointsList(parent: string, opts: ProjectsLocationsEndpointsListOptions = {}): Promise { opts = serializeProjectsLocationsEndpointsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/endpoints`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListEndpointsResponse(data); } /** * Updates an existing deployed model. Updatable fields include * `min_replica_count`, `max_replica_count`, `autoscaling_metric_specs`, * `disable_container_logging` (v1 only), and `enable_container_logging` * (v1beta1 only). * * @param endpoint Required. The name of the Endpoint resource into which to mutate a DeployedModel. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsMutateDeployedModel(endpoint: string, req: GoogleCloudAiplatformV1MutateDeployedModelRequest): Promise { req = serializeGoogleCloudAiplatformV1MutateDeployedModelRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:mutateDeployedModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsEndpointsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsEndpointsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsEndpointsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsEndpointsOperationsList(name: string, opts: ProjectsLocationsEndpointsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsEndpointsOperationsWait(name: string, opts: ProjectsLocationsEndpointsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsEndpointsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an Endpoint. * * @param name Output only. The resource name of the Endpoint. */ async projectsLocationsEndpointsPatch(name: string, req: GoogleCloudAiplatformV1Endpoint, opts: ProjectsLocationsEndpointsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Endpoint(req); opts = serializeProjectsLocationsEndpointsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1Endpoint(data); } /** * Perform an online prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsPredict(endpoint: string, req: GoogleCloudAiplatformV1PredictRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1PredictResponse; } async projectsLocationsEndpointsPredictLongRunning(endpoint: string, req: GoogleCloudAiplatformV1PredictLongRunningRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predictLongRunning`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Perform an online prediction with an arbitrary HTTP payload. The response * includes the following HTTP headers: * `X-Vertex-AI-Endpoint-Id`: ID of the * Endpoint that served this prediction. * `X-Vertex-AI-Deployed-Model-Id`: ID * of the Endpoint's DeployedModel that served this prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsRawPredict(endpoint: string, req: GoogleCloudAiplatformV1RawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1RawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:rawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Perform a server-side streaming online prediction request for Vertex LLM * streaming. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsServerStreamingPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamingPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamingPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:serverStreamingPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1StreamingPredictResponse(data); } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Perform a streaming online prediction with an arbitrary HTTP payload. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsStreamRawPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamRawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamRawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:streamRawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Undeploys a Model from an Endpoint, removing a DeployedModel from it, and * freeing all resources it's using. * * @param endpoint Required. The name of the Endpoint resource from which to undeploy a Model. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsUndeployModel(endpoint: string, req: GoogleCloudAiplatformV1UndeployModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:undeployModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Updates an Endpoint with a long running operation. * * @param name Output only. The resource name of the Endpoint. */ async projectsLocationsEndpointsUpdate(name: string, req: GoogleCloudAiplatformV1UpdateEndpointLongRunningRequest): Promise { req = serializeGoogleCloudAiplatformV1UpdateEndpointLongRunningRequest(req); const url = new URL(`${this.#baseUrl}v1/${ name }:update`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Evaluates instances based on a given metric. * * @param location Required. The resource name of the Location to evaluate the instances. Format: `projects/{project}/locations/{location}` */ async projectsLocationsEvaluateInstances(location: string, req: GoogleCloudAiplatformV1EvaluateInstancesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ location }:evaluateInstances`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1EvaluateInstancesResponse; } /** * Creates a new FeatureGroup in a given project and location. * * @param parent Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureGroupsCreate(parent: string, req: GoogleCloudAiplatformV1FeatureGroup, opts: ProjectsLocationsFeatureGroupsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureGroups`); if (opts.featureGroupId !== undefined) { url.searchParams.append("featureGroupId", String(opts.featureGroupId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single FeatureGroup. * * @param name Required. The name of the FeatureGroup to be deleted. Format: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsDelete(name: string, opts: ProjectsLocationsFeatureGroupsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Creates a batch of Features in a given FeatureGroup. * * @param parent Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateFeaturesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a new Feature in a given FeatureGroup. * * @param parent Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesCreate(parent: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeatureGroupsFeaturesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.featureId !== undefined) { url.searchParams.append("featureId", String(opts.featureId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single Feature. * * @param name Required. The name of the Features to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` */ async projectsLocationsFeatureGroupsFeaturesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets details of a single Feature. * * @param name Required. The name of the Feature resource. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Feature; } /** * Lists Features in a given FeatureGroup. * * @param parent Required. The resource name of the Location to list Features. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesList(parent: string, opts: ProjectsLocationsFeatureGroupsFeaturesListOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsFeaturesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.latestStatsCount !== undefined) { url.searchParams.append("latestStatsCount", String(opts.latestStatsCount)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeaturesResponse; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureGroupsFeaturesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureGroupsFeaturesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureGroupsFeaturesOperationsListWait(name: string, opts: ProjectsLocationsFeatureGroupsFeaturesOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureGroupsFeaturesOperationsWait(name: string, opts: ProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single Feature. * * @param name Immutable. Name of the Feature. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. */ async projectsLocationsFeatureGroupsFeaturesPatch(name: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeatureGroupsFeaturesPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsFeaturesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Gets details of a single FeatureGroup. * * @param name Required. The name of the FeatureGroup resource. */ async projectsLocationsFeatureGroupsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1FeatureGroup; } /** * Lists FeatureGroups in a given project and location. * * @param parent Required. The resource name of the Location to list FeatureGroups. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureGroupsList(parent: string, opts: ProjectsLocationsFeatureGroupsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureGroups`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeatureGroupsResponse; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureGroupsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureGroupsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureGroupsOperationsListWait(name: string, opts: ProjectsLocationsFeatureGroupsOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureGroupsOperationsWait(name: string, opts: ProjectsLocationsFeatureGroupsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single FeatureGroup. * * @param name Identifier. Name of the FeatureGroup. Format: `projects/{project}/locations/{location}/featureGroups/{featureGroup}` */ async projectsLocationsFeatureGroupsPatch(name: string, req: GoogleCloudAiplatformV1FeatureGroup, opts: ProjectsLocationsFeatureGroupsPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Creates a new FeatureOnlineStore in a given project and location. * * @param parent Required. The resource name of the Location to create FeatureOnlineStores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureOnlineStoresCreate(parent: string, req: GoogleCloudAiplatformV1FeatureOnlineStore, opts: ProjectsLocationsFeatureOnlineStoresCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureOnlineStores`); if (opts.featureOnlineStoreId !== undefined) { url.searchParams.append("featureOnlineStoreId", String(opts.featureOnlineStoreId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single FeatureOnlineStore. The FeatureOnlineStore must not * contain any FeatureViews. * * @param name Required. The name of the FeatureOnlineStore to be deleted. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}` */ async projectsLocationsFeatureOnlineStoresDelete(name: string, opts: ProjectsLocationsFeatureOnlineStoresDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Creates a new FeatureView in a given FeatureOnlineStore. * * @param parent Required. The resource name of the FeatureOnlineStore to create FeatureViews. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsCreate(parent: string, req: GoogleCloudAiplatformV1FeatureView, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1FeatureView(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/featureViews`); if (opts.featureViewId !== undefined) { url.searchParams.append("featureViewId", String(opts.featureViewId)); } if (opts.runSyncImmediately !== undefined) { url.searchParams.append("runSyncImmediately", String(opts.runSyncImmediately)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single FeatureView. * * @param name Required. The name of the FeatureView to be deleted. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets details of a single FeatureViewSync. * * @param name Required. The name of the FeatureViewSync resource. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}/featureViewSyncs/{feature_view_sync}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1FeatureViewSync; } /** * Lists FeatureViewSyncs in a given FeatureView. * * @param parent Required. The resource name of the FeatureView to list FeatureViewSyncs. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsList(parent: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureViewSyncs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeatureViewSyncsResponse; } /** * Fetch feature values under a FeatureView. * * @param featureView Required. FeatureView resource format `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}/featureViews/{featureView}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsFetchFeatureValues(featureView: string, req: GoogleCloudAiplatformV1FetchFeatureValuesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ featureView }:fetchFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data); } /** * Gets details of a single FeatureView. * * @param name Required. The name of the FeatureView resource. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1FeatureView(data); } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresFeatureViewsGetIamPolicy(resource: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists FeatureViews in a given FeatureOnlineStore. * * @param parent Required. The resource name of the FeatureOnlineStore to list FeatureViews. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsList(parent: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureViews`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListFeatureViewsResponse(data); } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsListWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single FeatureView. * * @param name Identifier. Name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsPatch(name: string, req: GoogleCloudAiplatformV1FeatureView, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1FeatureView(req); opts = serializeProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Search the nearest entities under a FeatureView. Search only works for * indexable feature view; if a feature view isn't indexable, returns Invalid * argument response. * * @param featureView Required. FeatureView resource format `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}/featureViews/{featureView}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsSearchNearestEntities(featureView: string, req: GoogleCloudAiplatformV1SearchNearestEntitiesRequest): Promise { req = serializeGoogleCloudAiplatformV1SearchNearestEntitiesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ featureView }:searchNearestEntities`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1SearchNearestEntitiesResponse(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresFeatureViewsSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Triggers on-demand sync for the FeatureView. * * @param featureView Required. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsSync(featureView: string, req: GoogleCloudAiplatformV1SyncFeatureViewRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ featureView }:sync`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1SyncFeatureViewResponse; } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresFeatureViewsTestIamPermissions(resource: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Gets details of a single FeatureOnlineStore. * * @param name Required. The name of the FeatureOnlineStore resource. */ async projectsLocationsFeatureOnlineStoresGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1FeatureOnlineStore; } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresGetIamPolicy(resource: string, opts: ProjectsLocationsFeatureOnlineStoresGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists FeatureOnlineStores in a given project and location. * * @param parent Required. The resource name of the Location to list FeatureOnlineStores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureOnlineStoresList(parent: string, opts: ProjectsLocationsFeatureOnlineStoresListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureOnlineStores`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeatureOnlineStoresResponse; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureOnlineStoresOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureOnlineStoresOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureOnlineStoresOperationsListWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureOnlineStoresOperationsWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureOnlineStoresOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single FeatureOnlineStore. * * @param name Identifier. Name of the FeatureOnlineStore. Format: `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` */ async projectsLocationsFeatureOnlineStoresPatch(name: string, req: GoogleCloudAiplatformV1FeatureOnlineStore, opts: ProjectsLocationsFeatureOnlineStoresPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeatureOnlineStoresPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresTestIamPermissions(resource: string, opts: ProjectsLocationsFeatureOnlineStoresTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Batch reads Feature values from a Featurestore. This API enables batch * reading Feature values, where each read instance in the batch may read * Feature values of entities from one or more EntityTypes. Point-in-time * correctness is guaranteed for Feature values of each read instance as of * each instance's read timestamp. * * @param featurestore Required. The resource name of the Featurestore from which to query Feature values. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresBatchReadFeatureValues(featurestore: string, req: GoogleCloudAiplatformV1BatchReadFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1BatchReadFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ featurestore }:batchReadFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a new Featurestore in a given project and location. * * @param parent Required. The resource name of the Location to create Featurestores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeaturestoresCreate(parent: string, req: GoogleCloudAiplatformV1Featurestore, opts: ProjectsLocationsFeaturestoresCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featurestores`); if (opts.featurestoreId !== undefined) { url.searchParams.append("featurestoreId", String(opts.featurestoreId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single Featurestore. The Featurestore must not contain any * EntityTypes or `force` must be set to true for the request to succeed. * * @param name Required. The name of the Featurestore to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresDelete(name: string, opts: ProjectsLocationsFeaturestoresDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Creates a new EntityType in a given Featurestore. * * @param parent Required. The resource name of the Featurestore to create EntityTypes. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresEntityTypesCreate(parent: string, req: GoogleCloudAiplatformV1EntityType, opts: ProjectsLocationsFeaturestoresEntityTypesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/entityTypes`); if (opts.entityTypeId !== undefined) { url.searchParams.append("entityTypeId", String(opts.entityTypeId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single EntityType. The EntityType must not have any Features or * `force` must be set to true for the request to succeed. * * @param name Required. The name of the EntityType to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` */ async projectsLocationsFeaturestoresEntityTypesDelete(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Delete Feature values from Featurestore. The progress of the deletion is * tracked by the returned operation. The deleted feature values are * guaranteed to be invisible to subsequent read operations after the * operation is marked as successfully done. If a delete feature values * operation fails, the feature values returned from reads and exports may be * inconsistent. If consistency is required, the caller must retry the same * delete request again and wait till the new operation returned is marked as * successfully done. * * @param entityType Required. The resource name of the EntityType grouping the Features for which values are being deleted from. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}` */ async projectsLocationsFeaturestoresEntityTypesDeleteFeatureValues(entityType: string, req: GoogleCloudAiplatformV1DeleteFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:deleteFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Exports Feature values from all the entities of a target EntityType. * * @param entityType Required. The resource name of the EntityType from which to export Feature values. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` */ async projectsLocationsFeaturestoresEntityTypesExportFeatureValues(entityType: string, req: GoogleCloudAiplatformV1ExportFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1ExportFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:exportFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a batch of Features in a given EntityType. * * @param parent Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateFeaturesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a new Feature in a given EntityType. * * @param parent Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesCreate(parent: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.featureId !== undefined) { url.searchParams.append("featureId", String(opts.featureId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single Feature. * * @param name Required. The name of the Features to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets details of a single Feature. * * @param name Required. The name of the Feature resource. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Feature; } /** * Lists Features in a given EntityType. * * @param parent Required. The resource name of the Location to list Features. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesList(parent: string, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.latestStatsCount !== undefined) { url.searchParams.append("latestStatsCount", String(opts.latestStatsCount)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeaturesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsList(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsWait(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single Feature. * * @param name Immutable. Name of the Feature. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. */ async projectsLocationsFeaturestoresEntityTypesFeaturesPatch(name: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Feature; } /** * Gets details of a single EntityType. * * @param name Required. The name of the EntityType resource. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` */ async projectsLocationsFeaturestoresEntityTypesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1EntityType; } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresEntityTypesGetIamPolicy(resource: string, opts: ProjectsLocationsFeaturestoresEntityTypesGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Imports Feature values into the Featurestore from a source storage. The * progress of the import is tracked by the returned operation. The imported * features are guaranteed to be visible to subsequent read operations after * the operation is marked as successfully done. If an import operation fails, * the Feature values returned from reads and exports may be inconsistent. If * consistency is required, the caller must retry the same import request * again and wait till the new operation returned is marked as successfully * done. There are also scenarios where the caller can cause inconsistency. - * Source data for import contains multiple distinct Feature values for the * same entity ID and timestamp. - Source is modified during an import. This * includes adding, updating, or removing source data and/or metadata. * Examples of updating metadata include but are not limited to changing * storage location, storage class, or retention policy. - Online serving * cluster is under-provisioned. * * @param entityType Required. The resource name of the EntityType grouping the Features for which values are being imported. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}` */ async projectsLocationsFeaturestoresEntityTypesImportFeatureValues(entityType: string, req: GoogleCloudAiplatformV1ImportFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1ImportFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:importFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists EntityTypes in a given Featurestore. * * @param parent Required. The resource name of the Featurestore to list EntityTypes. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresEntityTypesList(parent: string, opts: ProjectsLocationsFeaturestoresEntityTypesListOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/entityTypes`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListEntityTypesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsFeaturestoresEntityTypesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeaturestoresEntityTypesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeaturestoresEntityTypesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeaturestoresEntityTypesOperationsList(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeaturestoresEntityTypesOperationsWait(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single EntityType. * * @param name Immutable. Name of the EntityType. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. */ async projectsLocationsFeaturestoresEntityTypesPatch(name: string, req: GoogleCloudAiplatformV1EntityType, opts: ProjectsLocationsFeaturestoresEntityTypesPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1EntityType; } /** * Reads Feature values of a specific entity of an EntityType. For reading * feature values of multiple entities of an EntityType, please use * StreamingReadFeatureValues. * * @param entityType Required. The resource name of the EntityType for the entity being read. Value format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be `user`. */ async projectsLocationsFeaturestoresEntityTypesReadFeatureValues(entityType: string, req: GoogleCloudAiplatformV1ReadFeatureValuesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ entityType }:readFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponse(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresEntityTypesSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Reads Feature values for multiple entities. Depending on their size, data * for different entities may be broken up across multiple responses. * * @param entityType Required. The resource name of the entities' type. Value format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be `user`. */ async projectsLocationsFeaturestoresEntityTypesStreamingReadFeatureValues(entityType: string, req: GoogleCloudAiplatformV1StreamingReadFeatureValuesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ entityType }:streamingReadFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponse(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresEntityTypesTestIamPermissions(resource: string, opts: ProjectsLocationsFeaturestoresEntityTypesTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Writes Feature values of one or more entities of an EntityType. The * Feature values are merged into existing entities if any. The Feature values * to be written must have timestamp within the online storage retention. * * @param entityType Required. The resource name of the EntityType for the entities being written. Value format: `projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}`. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be `user`. */ async projectsLocationsFeaturestoresEntityTypesWriteFeatureValues(entityType: string, req: GoogleCloudAiplatformV1WriteFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1WriteFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:writeFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1WriteFeatureValuesResponse; } /** * Gets details of a single Featurestore. * * @param name Required. The name of the Featurestore resource. */ async projectsLocationsFeaturestoresGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Featurestore; } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresGetIamPolicy(resource: string, opts: ProjectsLocationsFeaturestoresGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists Featurestores in a given project and location. * * @param parent Required. The resource name of the Location to list Featurestores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeaturestoresList(parent: string, opts: ProjectsLocationsFeaturestoresListOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/featurestores`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeaturestoresResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsFeaturestoresOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeaturestoresOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeaturestoresOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeaturestoresOperationsList(name: string, opts: ProjectsLocationsFeaturestoresOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeaturestoresOperationsWait(name: string, opts: ProjectsLocationsFeaturestoresOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single Featurestore. * * @param name Output only. Name of the Featurestore. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresPatch(name: string, req: GoogleCloudAiplatformV1Featurestore, opts: ProjectsLocationsFeaturestoresPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Searches Features matching a query in a given project. * * @param location Required. The resource name of the Location to search Features. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeaturestoresSearchFeatures(location: string, opts: ProjectsLocationsFeaturestoresSearchFeaturesOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ location }/featurestores:searchFeatures`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.query !== undefined) { url.searchParams.append("query", String(opts.query)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1SearchFeaturesResponse; } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresTestIamPermissions(resource: string, opts: ProjectsLocationsFeaturestoresTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Gets information about a location. * * @param name Resource name for the location. */ async projectsLocationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudLocationLocation; } /** * Cancels a HyperparameterTuningJob. Starts asynchronous cancellation on the * HyperparameterTuningJob. The server makes a best effort to cancel the job, * but success is not guaranteed. Clients can use * JobService.GetHyperparameterTuningJob or other methods to check whether the * cancellation succeeded or whether the job completed despite cancellation. * On successful cancellation, the HyperparameterTuningJob is not deleted; * instead it becomes a job with a HyperparameterTuningJob.error value with a * google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * HyperparameterTuningJob.state is set to `CANCELLED`. * * @param name Required. The name of the HyperparameterTuningJob to cancel. Format: `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` */ async projectsLocationsHyperparameterTuningJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelHyperparameterTuningJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a HyperparameterTuningJob * * @param parent Required. The resource name of the Location to create the HyperparameterTuningJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsHyperparameterTuningJobsCreate(parent: string, req: GoogleCloudAiplatformV1HyperparameterTuningJob): Promise { req = serializeGoogleCloudAiplatformV1HyperparameterTuningJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/hyperparameterTuningJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(data); } /** * Deletes a HyperparameterTuningJob. * * @param name Required. The name of the HyperparameterTuningJob resource to be deleted. Format: `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` */ async projectsLocationsHyperparameterTuningJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a HyperparameterTuningJob * * @param name Required. The name of the HyperparameterTuningJob resource. Format: `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` */ async projectsLocationsHyperparameterTuningJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(data); } /** * Lists HyperparameterTuningJobs in a Location. * * @param parent Required. The resource name of the Location to list the HyperparameterTuningJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsHyperparameterTuningJobsList(parent: string, opts: ProjectsLocationsHyperparameterTuningJobsListOptions = {}): Promise { opts = serializeProjectsLocationsHyperparameterTuningJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/hyperparameterTuningJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsHyperparameterTuningJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsHyperparameterTuningJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsHyperparameterTuningJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsHyperparameterTuningJobsOperationsList(name: string, opts: ProjectsLocationsHyperparameterTuningJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsHyperparameterTuningJobsOperationsWait(name: string, opts: ProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Creates an IndexEndpoint. * * @param parent Required. The resource name of the Location to create the IndexEndpoint in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexEndpointsCreate(parent: string, req: GoogleCloudAiplatformV1IndexEndpoint): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/indexEndpoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes an IndexEndpoint. * * @param name Required. The name of the IndexEndpoint resource to be deleted. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Deploys an Index into this IndexEndpoint, creating a DeployedIndex within * it. Only non-empty Indexes can be deployed. * * @param indexEndpoint Required. The name of the IndexEndpoint resource into which to deploy an Index. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsDeployIndex(indexEndpoint: string, req: GoogleCloudAiplatformV1DeployIndexRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:deployIndex`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Finds the nearest neighbors of each vector within the request. * * @param indexEndpoint Required. The name of the index endpoint. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsFindNeighbors(indexEndpoint: string, req: GoogleCloudAiplatformV1FindNeighborsRequest): Promise { req = serializeGoogleCloudAiplatformV1FindNeighborsRequest(req); const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:findNeighbors`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1FindNeighborsResponse(data); } /** * Gets an IndexEndpoint. * * @param name Required. The name of the IndexEndpoint resource. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1IndexEndpoint; } /** * Lists IndexEndpoints in a Location. * * @param parent Required. The resource name of the Location from which to list the IndexEndpoints. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexEndpointsList(parent: string, opts: ProjectsLocationsIndexEndpointsListOptions = {}): Promise { opts = serializeProjectsLocationsIndexEndpointsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/indexEndpoints`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListIndexEndpointsResponse; } /** * Update an existing DeployedIndex under an IndexEndpoint. * * @param indexEndpoint Required. The name of the IndexEndpoint resource into which to deploy an Index. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsMutateDeployedIndex(indexEndpoint: string, req: GoogleCloudAiplatformV1DeployedIndex): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:mutateDeployedIndex`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsIndexEndpointsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsIndexEndpointsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsIndexEndpointsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsIndexEndpointsOperationsList(name: string, opts: ProjectsLocationsIndexEndpointsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsIndexEndpointsOperationsWait(name: string, opts: ProjectsLocationsIndexEndpointsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsIndexEndpointsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an IndexEndpoint. * * @param name Output only. The resource name of the IndexEndpoint. */ async projectsLocationsIndexEndpointsPatch(name: string, req: GoogleCloudAiplatformV1IndexEndpoint, opts: ProjectsLocationsIndexEndpointsPatchOptions = {}): Promise { opts = serializeProjectsLocationsIndexEndpointsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1IndexEndpoint; } /** * Reads the datapoints/vectors of the given IDs. A maximum of 1000 * datapoints can be retrieved in a batch. * * @param indexEndpoint Required. The name of the index endpoint. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsReadIndexDatapoints(indexEndpoint: string, req: GoogleCloudAiplatformV1ReadIndexDatapointsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:readIndexDatapoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ReadIndexDatapointsResponse(data); } /** * Undeploys an Index from an IndexEndpoint, removing a DeployedIndex from * it, and freeing all resources it's using. * * @param indexEndpoint Required. The name of the IndexEndpoint resource from which to undeploy an Index. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsUndeployIndex(indexEndpoint: string, req: GoogleCloudAiplatformV1UndeployIndexRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:undeployIndex`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates an Index. * * @param parent Required. The resource name of the Location to create the Index in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexesCreate(parent: string, req: GoogleCloudAiplatformV1Index): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/indexes`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes an Index. An Index can only be deleted when all its * DeployedIndexes had been undeployed. * * @param name Required. The name of the Index resource to be deleted. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets an Index. * * @param name Required. The name of the Index resource. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Index; } /** * Lists Indexes in a Location. * * @param parent Required. The resource name of the Location from which to list the Indexes. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexesList(parent: string, opts: ProjectsLocationsIndexesListOptions = {}): Promise { opts = serializeProjectsLocationsIndexesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/indexes`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListIndexesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsIndexesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsIndexesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsIndexesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsIndexesOperationsList(name: string, opts: ProjectsLocationsIndexesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsIndexesOperationsWait(name: string, opts: ProjectsLocationsIndexesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsIndexesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an Index. * * @param name Output only. The resource name of the Index. */ async projectsLocationsIndexesPatch(name: string, req: GoogleCloudAiplatformV1Index, opts: ProjectsLocationsIndexesPatchOptions = {}): Promise { opts = serializeProjectsLocationsIndexesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Remove Datapoints from an Index. * * @param index Required. The name of the Index resource to be updated. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesRemoveDatapoints(index: string, req: GoogleCloudAiplatformV1RemoveDatapointsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ index }:removeDatapoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1RemoveDatapointsResponse; } /** * Add/update Datapoints into an Index. * * @param index Required. The name of the Index resource to be updated. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesUpsertDatapoints(index: string, req: GoogleCloudAiplatformV1UpsertDatapointsRequest): Promise { req = serializeGoogleCloudAiplatformV1UpsertDatapointsRequest(req); const url = new URL(`${this.#baseUrl}v1/${ index }:upsertDatapoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1UpsertDatapointsResponse; } /** * Lists information about the supported locations for this service. * * @param name The resource that owns the locations collection, if applicable. */ async projectsLocationsList(name: string, opts: ProjectsLocationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/locations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudLocationListLocationsResponse; } /** * Creates an Artifact associated with a MetadataStore. * * @param parent Required. The resource name of the MetadataStore where the Artifact should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresArtifactsCreate(parent: string, req: GoogleCloudAiplatformV1Artifact, opts: ProjectsLocationsMetadataStoresArtifactsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/artifacts`); if (opts.artifactId !== undefined) { url.searchParams.append("artifactId", String(opts.artifactId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Artifact; } /** * Deletes an Artifact. * * @param name Required. The resource name of the Artifact to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` */ async projectsLocationsMetadataStoresArtifactsDelete(name: string, opts: ProjectsLocationsMetadataStoresArtifactsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.etag !== undefined) { url.searchParams.append("etag", String(opts.etag)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Retrieves a specific Artifact. * * @param name Required. The resource name of the Artifact to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` */ async projectsLocationsMetadataStoresArtifactsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Artifact; } /** * Lists Artifacts in the MetadataStore. * * @param parent Required. The MetadataStore whose Artifacts should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresArtifactsList(parent: string, opts: ProjectsLocationsMetadataStoresArtifactsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/artifacts`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListArtifactsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresArtifactsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresArtifactsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresArtifactsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresArtifactsOperationsList(name: string, opts: ProjectsLocationsMetadataStoresArtifactsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresArtifactsOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a stored Artifact. * * @param name Output only. The resource name of the Artifact. */ async projectsLocationsMetadataStoresArtifactsPatch(name: string, req: GoogleCloudAiplatformV1Artifact, opts: ProjectsLocationsMetadataStoresArtifactsPatchOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresArtifactsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.allowMissing !== undefined) { url.searchParams.append("allowMissing", String(opts.allowMissing)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Artifact; } /** * Purges Artifacts. * * @param parent Required. The metadata store to purge Artifacts from. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresArtifactsPurge(parent: string, req: GoogleCloudAiplatformV1PurgeArtifactsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/artifacts:purge`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Retrieves lineage of an Artifact represented through Artifacts and * Executions connected by Event edges and returned as a LineageSubgraph. * * @param artifact Required. The resource name of the Artifact whose Lineage needs to be retrieved as a LineageSubgraph. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the number of Events that would be returned for the Context exceeds 1000. */ async projectsLocationsMetadataStoresArtifactsQueryArtifactLineageSubgraph(artifact: string, opts: ProjectsLocationsMetadataStoresArtifactsQueryArtifactLineageSubgraphOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ artifact }:queryArtifactLineageSubgraph`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.maxHops !== undefined) { url.searchParams.append("maxHops", String(opts.maxHops)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1LineageSubgraph; } /** * Adds a set of Artifacts and Executions to a Context. If any of the * Artifacts or Executions have already been added to a Context, they are * simply skipped. * * @param context Required. The resource name of the Context that the Artifacts and Executions belong to. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsAddContextArtifactsAndExecutions(context: string, req: GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:addContextArtifactsAndExecutions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse; } /** * Adds a set of Contexts as children to a parent Context. If any of the * child Contexts have already been added to the parent Context, they are * simply skipped. If this call would create a cycle or cause any Context to * have more than 10 parents, the request will fail with an INVALID_ARGUMENT * error. * * @param context Required. The resource name of the parent Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsAddContextChildren(context: string, req: GoogleCloudAiplatformV1AddContextChildrenRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:addContextChildren`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1AddContextChildrenResponse; } /** * Creates a Context associated with a MetadataStore. * * @param parent Required. The resource name of the MetadataStore where the Context should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresContextsCreate(parent: string, req: GoogleCloudAiplatformV1Context, opts: ProjectsLocationsMetadataStoresContextsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/contexts`); if (opts.contextId !== undefined) { url.searchParams.append("contextId", String(opts.contextId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Context; } /** * Deletes a stored Context. * * @param name Required. The resource name of the Context to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsDelete(name: string, opts: ProjectsLocationsMetadataStoresContextsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.etag !== undefined) { url.searchParams.append("etag", String(opts.etag)); } if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Retrieves a specific Context. * * @param name Required. The resource name of the Context to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Context; } /** * Lists Contexts on the MetadataStore. * * @param parent Required. The MetadataStore whose Contexts should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresContextsList(parent: string, opts: ProjectsLocationsMetadataStoresContextsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/contexts`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListContextsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresContextsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresContextsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresContextsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresContextsOperationsList(name: string, opts: ProjectsLocationsMetadataStoresContextsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresContextsOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresContextsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresContextsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a stored Context. * * @param name Immutable. The resource name of the Context. */ async projectsLocationsMetadataStoresContextsPatch(name: string, req: GoogleCloudAiplatformV1Context, opts: ProjectsLocationsMetadataStoresContextsPatchOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresContextsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.allowMissing !== undefined) { url.searchParams.append("allowMissing", String(opts.allowMissing)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Context; } /** * Purges Contexts. * * @param parent Required. The metadata store to purge Contexts from. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresContextsPurge(parent: string, req: GoogleCloudAiplatformV1PurgeContextsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/contexts:purge`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Retrieves Artifacts and Executions within the specified Context, connected * by Event edges and returned as a LineageSubgraph. * * @param context Required. The resource name of the Context whose Artifacts and Executions should be retrieved as a LineageSubgraph. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the number of Events that would be returned for the Context exceeds 1000. */ async projectsLocationsMetadataStoresContextsQueryContextLineageSubgraph(context: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:queryContextLineageSubgraph`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1LineageSubgraph; } /** * Remove a set of children contexts from a parent Context. If any of the * child Contexts were NOT added to the parent Context, they are simply * skipped. * * @param context Required. The resource name of the parent Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsRemoveContextChildren(context: string, req: GoogleCloudAiplatformV1RemoveContextChildrenRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:removeContextChildren`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1RemoveContextChildrenResponse; } /** * Initializes a MetadataStore, including allocation of resources. * * @param parent Required. The resource name of the Location where the MetadataStore should be created. Format: `projects/{project}/locations/{location}/` */ async projectsLocationsMetadataStoresCreate(parent: string, req: GoogleCloudAiplatformV1MetadataStore, opts: ProjectsLocationsMetadataStoresCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataStores`); if (opts.metadataStoreId !== undefined) { url.searchParams.append("metadataStoreId", String(opts.metadataStoreId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single MetadataStore and all its child resources (Artifacts, * Executions, and Contexts). * * @param name Required. The resource name of the MetadataStore to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresDelete(name: string, opts: ProjectsLocationsMetadataStoresDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Adds Events to the specified Execution. An Event indicates whether an * Artifact was used as an input or output for an Execution. If an Event * already exists between the Execution and the Artifact, the Event is * skipped. * * @param execution Required. The resource name of the Execution that the Events connect Artifacts with. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsAddExecutionEvents(execution: string, req: GoogleCloudAiplatformV1AddExecutionEventsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ execution }:addExecutionEvents`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1AddExecutionEventsResponse; } /** * Creates an Execution associated with a MetadataStore. * * @param parent Required. The resource name of the MetadataStore where the Execution should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresExecutionsCreate(parent: string, req: GoogleCloudAiplatformV1Execution, opts: ProjectsLocationsMetadataStoresExecutionsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/executions`); if (opts.executionId !== undefined) { url.searchParams.append("executionId", String(opts.executionId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Execution; } /** * Deletes an Execution. * * @param name Required. The resource name of the Execution to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsDelete(name: string, opts: ProjectsLocationsMetadataStoresExecutionsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.etag !== undefined) { url.searchParams.append("etag", String(opts.etag)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Retrieves a specific Execution. * * @param name Required. The resource name of the Execution to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Execution; } /** * Lists Executions in the MetadataStore. * * @param parent Required. The MetadataStore whose Executions should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresExecutionsList(parent: string, opts: ProjectsLocationsMetadataStoresExecutionsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/executions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListExecutionsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresExecutionsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresExecutionsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresExecutionsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresExecutionsOperationsList(name: string, opts: ProjectsLocationsMetadataStoresExecutionsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresExecutionsOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a stored Execution. * * @param name Output only. The resource name of the Execution. */ async projectsLocationsMetadataStoresExecutionsPatch(name: string, req: GoogleCloudAiplatformV1Execution, opts: ProjectsLocationsMetadataStoresExecutionsPatchOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresExecutionsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.allowMissing !== undefined) { url.searchParams.append("allowMissing", String(opts.allowMissing)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Execution; } /** * Purges Executions. * * @param parent Required. The metadata store to purge Executions from. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresExecutionsPurge(parent: string, req: GoogleCloudAiplatformV1PurgeExecutionsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/executions:purge`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Obtains the set of input and output Artifacts for this Execution, in the * form of LineageSubgraph that also contains the Execution and connecting * Events. * * @param execution Required. The resource name of the Execution whose input and output Artifacts should be retrieved as a LineageSubgraph. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsQueryExecutionInputsAndOutputs(execution: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ execution }:queryExecutionInputsAndOutputs`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1LineageSubgraph; } /** * Retrieves a specific MetadataStore. * * @param name Required. The resource name of the MetadataStore to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1MetadataStore; } /** * Lists MetadataStores for a Location. * * @param parent Required. The Location whose MetadataStores should be listed. Format: `projects/{project}/locations/{location}` */ async projectsLocationsMetadataStoresList(parent: string, opts: ProjectsLocationsMetadataStoresListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataStores`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListMetadataStoresResponse; } /** * Creates a MetadataSchema. * * @param parent Required. The resource name of the MetadataStore where the MetadataSchema should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresMetadataSchemasCreate(parent: string, req: GoogleCloudAiplatformV1MetadataSchema, opts: ProjectsLocationsMetadataStoresMetadataSchemasCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataSchemas`); if (opts.metadataSchemaId !== undefined) { url.searchParams.append("metadataSchemaId", String(opts.metadataSchemaId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1MetadataSchema; } /** * Retrieves a specific MetadataSchema. * * @param name Required. The resource name of the MetadataSchema to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}` */ async projectsLocationsMetadataStoresMetadataSchemasGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1MetadataSchema; } /** * Lists MetadataSchemas. * * @param parent Required. The MetadataStore whose MetadataSchemas should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresMetadataSchemasList(parent: string, opts: ProjectsLocationsMetadataStoresMetadataSchemasListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataSchemas`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListMetadataSchemasResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresOperationsList(name: string, opts: ProjectsLocationsMetadataStoresOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Batch migrates resources from ml.googleapis.com, automl.googleapis.com, * and datalabeling.googleapis.com to Vertex AI. * * @param parent Required. The location of the migrated resource will live in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsMigratableResourcesBatchMigrate(parent: string, req: GoogleCloudAiplatformV1BatchMigrateResourcesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/migratableResources:batchMigrate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMigratableResourcesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMigratableResourcesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMigratableResourcesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMigratableResourcesOperationsList(name: string, opts: ProjectsLocationsMigratableResourcesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMigratableResourcesOperationsWait(name: string, opts: ProjectsLocationsMigratableResourcesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMigratableResourcesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Searches all of the resources in automl.googleapis.com, * datalabeling.googleapis.com and ml.googleapis.com that can be migrated to * Vertex AI's given location. * * @param parent Required. The location that the migratable resources should be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: `projects/{project}/locations/{location}` */ async projectsLocationsMigratableResourcesSearch(parent: string, req: GoogleCloudAiplatformV1SearchMigratableResourcesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/migratableResources:search`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1SearchMigratableResourcesResponse; } /** * Creates a ModelDeploymentMonitoringJob. It will run periodically on a * configured interval. * * @param parent Required. The parent of the ModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelDeploymentMonitoringJobsCreate(parent: string, req: GoogleCloudAiplatformV1ModelDeploymentMonitoringJob): Promise { req = serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/modelDeploymentMonitoringJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data); } /** * Deletes a ModelDeploymentMonitoringJob. * * @param name Required. The resource name of the model monitoring job to delete. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a ModelDeploymentMonitoringJob. * * @param name Required. The resource name of the ModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data); } /** * Lists ModelDeploymentMonitoringJobs in a Location. * * @param parent Required. The parent of the ModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelDeploymentMonitoringJobsList(parent: string, opts: ProjectsLocationsModelDeploymentMonitoringJobsListOptions = {}): Promise { opts = serializeProjectsLocationsModelDeploymentMonitoringJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/modelDeploymentMonitoringJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsList(name: string, opts: ProjectsLocationsModelDeploymentMonitoringJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsWait(name: string, opts: ProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a ModelDeploymentMonitoringJob. * * @param name Output only. Resource name of a ModelDeploymentMonitoringJob. */ async projectsLocationsModelDeploymentMonitoringJobsPatch(name: string, req: GoogleCloudAiplatformV1ModelDeploymentMonitoringJob, opts: ProjectsLocationsModelDeploymentMonitoringJobsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(req); opts = serializeProjectsLocationsModelDeploymentMonitoringJobsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Pauses a ModelDeploymentMonitoringJob. If the job is running, the server * makes a best effort to cancel the job. Will mark * ModelDeploymentMonitoringJob.state to 'PAUSED'. * * @param name Required. The resource name of the ModelDeploymentMonitoringJob to pause. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsPause(name: string, req: GoogleCloudAiplatformV1PauseModelDeploymentMonitoringJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:pause`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Resumes a paused ModelDeploymentMonitoringJob. It will start to run from * next scheduled time. A deleted ModelDeploymentMonitoringJob can't be * resumed. * * @param name Required. The resource name of the ModelDeploymentMonitoringJob to resume. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsResume(name: string, req: GoogleCloudAiplatformV1ResumeModelDeploymentMonitoringJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:resume`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Searches Model Monitoring Statistics generated within a given time window. * * @param modelDeploymentMonitoringJob Required. ModelDeploymentMonitoring Job resource name. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsSearchModelDeploymentMonitoringStatsAnomalies(modelDeploymentMonitoringJob: string, req: GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest): Promise { req = serializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ modelDeploymentMonitoringJob }:searchModelDeploymentMonitoringStatsAnomalies`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse(data); } /** * Copies an already existing Vertex AI Model into the specified Location. * The source Model must exist in the same Project. When copying custom * Models, the users themselves are responsible for Model.metadata content to * be region-agnostic, as well as making sure that any resources (e.g. files) * it depends on remain accessible. * * @param parent Required. The resource name of the Location into which to copy the Model. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelsCopy(parent: string, req: GoogleCloudAiplatformV1CopyModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/models:copy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Model. A model cannot be deleted if any Endpoint resource has a * DeployedModel based on the model in its deployed_models field. * * @param name Required. The name of the Model resource to be deleted. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Deletes a Model version. Model version can only be deleted if there are no * DeployedModels created from it. Deleting the only version in the Model is * not allowed. Use DeleteModel for deleting the Model instead. * * @param name Required. The name of the model version to be deleted, with a version ID explicitly included. Example: `projects/{project}/locations/{location}/models/{model}@1234` */ async projectsLocationsModelsDeleteVersion(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:deleteVersion`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a ModelEvaluation. * * @param name Required. The name of the ModelEvaluation resource. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}` */ async projectsLocationsModelsEvaluationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ModelEvaluation; } /** * Imports an externally generated ModelEvaluation. * * @param parent Required. The name of the parent model resource. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsEvaluationsImport(parent: string, req: GoogleCloudAiplatformV1ImportModelEvaluationRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/evaluations:import`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1ModelEvaluation; } /** * Lists ModelEvaluations in a Model. * * @param parent Required. The resource name of the Model to list the ModelEvaluations from. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsEvaluationsList(parent: string, opts: ProjectsLocationsModelsEvaluationsListOptions = {}): Promise { opts = serializeProjectsLocationsModelsEvaluationsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/evaluations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListModelEvaluationsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsModelsEvaluationsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsModelsEvaluationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsModelsEvaluationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsModelsEvaluationsOperationsList(name: string, opts: ProjectsLocationsModelsEvaluationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsModelsEvaluationsOperationsWait(name: string, opts: ProjectsLocationsModelsEvaluationsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsModelsEvaluationsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Imports a list of externally generated EvaluatedAnnotations. * * @param parent Required. The name of the parent ModelEvaluationSlice resource. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}` */ async projectsLocationsModelsEvaluationsSlicesBatchImport(parent: string, req: GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }:batchImport`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse; } /** * Gets a ModelEvaluationSlice. * * @param name Required. The name of the ModelEvaluationSlice resource. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}` */ async projectsLocationsModelsEvaluationsSlicesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ModelEvaluationSlice; } /** * Lists ModelEvaluationSlices in a ModelEvaluation. * * @param parent Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}` */ async projectsLocationsModelsEvaluationsSlicesList(parent: string, opts: ProjectsLocationsModelsEvaluationsSlicesListOptions = {}): Promise { opts = serializeProjectsLocationsModelsEvaluationsSlicesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/slices`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListModelEvaluationSlicesResponse; } /** * Exports a trained, exportable Model to a location specified by the user. A * Model is considered to be exportable if it has at least one supported * export format. * * @param name Required. The resource name of the Model to export. The resource name may contain version id or version alias to specify the version, if no version is specified, the default version will be exported. */ async projectsLocationsModelsExport(name: string, req: GoogleCloudAiplatformV1ExportModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:export`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Gets a Model. * * @param name Required. The name of the Model resource. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. */ async projectsLocationsModelsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Model(data); } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsModelsGetIamPolicy(resource: string, opts: ProjectsLocationsModelsGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists Models in a Location. * * @param parent Required. The resource name of the Location to list the Models from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelsList(parent: string, opts: ProjectsLocationsModelsListOptions = {}): Promise { opts = serializeProjectsLocationsModelsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/models`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListModelsResponse(data); } /** * Lists versions of the specified model. * * @param name Required. The name of the model to list versions for. */ async projectsLocationsModelsListVersions(name: string, opts: ProjectsLocationsModelsListVersionsOptions = {}): Promise { opts = serializeProjectsLocationsModelsListVersionsOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:listVersions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListModelVersionsResponse(data); } /** * Merges a set of aliases for a Model version. * * @param name Required. The name of the model version to merge aliases, with a version ID explicitly included. Example: `projects/{project}/locations/{location}/models/{model}@1234` */ async projectsLocationsModelsMergeVersionAliases(name: string, req: GoogleCloudAiplatformV1MergeVersionAliasesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:mergeVersionAliases`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Model(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsModelsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsModelsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsModelsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsModelsOperationsList(name: string, opts: ProjectsLocationsModelsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsModelsOperationsWait(name: string, opts: ProjectsLocationsModelsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsModelsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a Model. * * @param name The resource name of the Model. */ async projectsLocationsModelsPatch(name: string, req: GoogleCloudAiplatformV1Model, opts: ProjectsLocationsModelsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Model(req); opts = serializeProjectsLocationsModelsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1Model(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsModelsSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsModelsTestIamPermissions(resource: string, opts: ProjectsLocationsModelsTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Incrementally update the dataset used for an examples model. * * @param model Required. The resource name of the Model to update. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsUpdateExplanationDataset(model: string, req: GoogleCloudAiplatformV1UpdateExplanationDatasetRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ model }:updateExplanationDataset`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Uploads a Model artifact into Vertex AI. * * @param parent Required. The resource name of the Location into which to upload the Model. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelsUpload(parent: string, req: GoogleCloudAiplatformV1UploadModelRequest): Promise { req = serializeGoogleCloudAiplatformV1UploadModelRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/models:upload`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Cancels a NasJob. Starts asynchronous cancellation on the NasJob. The * server makes a best effort to cancel the job, but success is not * guaranteed. Clients can use JobService.GetNasJob or other methods to check * whether the cancellation succeeded or whether the job completed despite * cancellation. On successful cancellation, the NasJob is not deleted; * instead it becomes a job with a NasJob.error value with a * google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * NasJob.state is set to `CANCELLED`. * * @param name Required. The name of the NasJob to cancel. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelNasJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a NasJob * * @param parent Required. The resource name of the Location to create the NasJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNasJobsCreate(parent: string, req: GoogleCloudAiplatformV1NasJob): Promise { req = serializeGoogleCloudAiplatformV1NasJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/nasJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1NasJob(data); } /** * Deletes a NasJob. * * @param name Required. The name of the NasJob resource to be deleted. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NasJob * * @param name Required. The name of the NasJob resource. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1NasJob(data); } /** * Lists NasJobs in a Location. * * @param parent Required. The resource name of the Location to list the NasJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNasJobsList(parent: string, opts: ProjectsLocationsNasJobsListOptions = {}): Promise { opts = serializeProjectsLocationsNasJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/nasJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListNasJobsResponse(data); } /** * Gets a NasTrialDetail. * * @param name Required. The name of the NasTrialDetail resource. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}/nasTrialDetails/{nas_trial_detail}` */ async projectsLocationsNasJobsNasTrialDetailsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1NasTrialDetail; } /** * List top NasTrialDetails of a NasJob. * * @param parent Required. The name of the NasJob resource. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsNasTrialDetailsList(parent: string, opts: ProjectsLocationsNasJobsNasTrialDetailsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/nasTrialDetails`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListNasTrialDetailsResponse; } /** * Creates a NotebookExecutionJob. * * @param parent Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookExecutionJobsCreate(parent: string, req: GoogleCloudAiplatformV1NotebookExecutionJob, opts: ProjectsLocationsNotebookExecutionJobsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1NotebookExecutionJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookExecutionJobs`); if (opts.notebookExecutionJobId !== undefined) { url.searchParams.append("notebookExecutionJobId", String(opts.notebookExecutionJobId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a NotebookExecutionJob. * * @param name Required. The name of the NotebookExecutionJob resource to be deleted. */ async projectsLocationsNotebookExecutionJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NotebookExecutionJob. * * @param name Required. The name of the NotebookExecutionJob resource. */ async projectsLocationsNotebookExecutionJobsGet(name: string, opts: ProjectsLocationsNotebookExecutionJobsGetOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.view !== undefined) { url.searchParams.append("view", String(opts.view)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1NotebookExecutionJob(data); } /** * Lists NotebookExecutionJobs in a Location. * * @param parent Required. The resource name of the Location from which to list the NotebookExecutionJobs. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookExecutionJobsList(parent: string, opts: ProjectsLocationsNotebookExecutionJobsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookExecutionJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.view !== undefined) { url.searchParams.append("view", String(opts.view)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListNotebookExecutionJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsNotebookExecutionJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsNotebookExecutionJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsNotebookExecutionJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsNotebookExecutionJobsOperationsList(name: string, opts: ProjectsLocationsNotebookExecutionJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsNotebookExecutionJobsOperationsWait(name: string, opts: ProjectsLocationsNotebookExecutionJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsNotebookExecutionJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Assigns a NotebookRuntime to a user for a particular Notebook file. This * method will either returns an existing assignment or generates a new one. * * @param parent Required. The resource name of the Location to get the NotebookRuntime assignment. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimesAssign(parent: string, req: GoogleCloudAiplatformV1AssignNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimes:assign`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be deleted. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1NotebookRuntime; } /** * Lists NotebookRuntimes in a Location. * * @param parent Required. The resource name of the Location from which to list the NotebookRuntimes. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimesList(parent: string, opts: ProjectsLocationsNotebookRuntimesListOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimes`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListNotebookRuntimesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsNotebookRuntimesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsNotebookRuntimesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsNotebookRuntimesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsNotebookRuntimesOperationsList(name: string, opts: ProjectsLocationsNotebookRuntimesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsNotebookRuntimesOperationsWait(name: string, opts: ProjectsLocationsNotebookRuntimesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Starts a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be started. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesStart(name: string, req: GoogleCloudAiplatformV1StartNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:start`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Stops a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be stopped. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesStop(name: string, req: GoogleCloudAiplatformV1StopNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:stop`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Upgrades a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be upgrade. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesUpgrade(name: string, req: GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:upgrade`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a NotebookRuntimeTemplate. * * @param parent Required. The resource name of the Location to create the NotebookRuntimeTemplate. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimeTemplatesCreate(parent: string, req: GoogleCloudAiplatformV1NotebookRuntimeTemplate, opts: ProjectsLocationsNotebookRuntimeTemplatesCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimeTemplates`); if (opts.notebookRuntimeTemplateId !== undefined) { url.searchParams.append("notebookRuntimeTemplateId", String(opts.notebookRuntimeTemplateId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a NotebookRuntimeTemplate. * * @param name Required. The name of the NotebookRuntimeTemplate resource to be deleted. Format: `projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}` */ async projectsLocationsNotebookRuntimeTemplatesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NotebookRuntimeTemplate. * * @param name Required. The name of the NotebookRuntimeTemplate resource. Format: `projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}` */ async projectsLocationsNotebookRuntimeTemplatesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data); } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsNotebookRuntimeTemplatesGetIamPolicy(resource: string, opts: ProjectsLocationsNotebookRuntimeTemplatesGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists NotebookRuntimeTemplates in a Location. * * @param parent Required. The resource name of the Location from which to list the NotebookRuntimeTemplates. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimeTemplatesList(parent: string, opts: ProjectsLocationsNotebookRuntimeTemplatesListOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimeTemplatesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimeTemplates`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsNotebookRuntimeTemplatesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsNotebookRuntimeTemplatesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsNotebookRuntimeTemplatesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsNotebookRuntimeTemplatesOperationsList(name: string, opts: ProjectsLocationsNotebookRuntimeTemplatesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsNotebookRuntimeTemplatesOperationsWait(name: string, opts: ProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a NotebookRuntimeTemplate. * * @param name The resource name of the NotebookRuntimeTemplate. */ async projectsLocationsNotebookRuntimeTemplatesPatch(name: string, req: GoogleCloudAiplatformV1NotebookRuntimeTemplate, opts: ProjectsLocationsNotebookRuntimeTemplatesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(req); opts = serializeProjectsLocationsNotebookRuntimeTemplatesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsNotebookRuntimeTemplatesSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsNotebookRuntimeTemplatesTestIamPermissions(resource: string, opts: ProjectsLocationsNotebookRuntimeTemplatesTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsOperationsList(name: string, opts: ProjectsLocationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsOperationsWait(name: string, opts: ProjectsLocationsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Creates a PersistentResource. * * @param parent Required. The resource name of the Location to create the PersistentResource in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPersistentResourcesCreate(parent: string, req: GoogleCloudAiplatformV1PersistentResource, opts: ProjectsLocationsPersistentResourcesCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1PersistentResource(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/persistentResources`); if (opts.persistentResourceId !== undefined) { url.searchParams.append("persistentResourceId", String(opts.persistentResourceId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a PersistentResource. * * @param name Required. The name of the PersistentResource to be deleted. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` */ async projectsLocationsPersistentResourcesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a PersistentResource. * * @param name Required. The name of the PersistentResource resource. Format: `projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}` */ async projectsLocationsPersistentResourcesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1PersistentResource(data); } /** * Lists PersistentResources in a Location. * * @param parent Required. The resource name of the Location to list the PersistentResources from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPersistentResourcesList(parent: string, opts: ProjectsLocationsPersistentResourcesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/persistentResources`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListPersistentResourcesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsPersistentResourcesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsPersistentResourcesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsPersistentResourcesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsPersistentResourcesOperationsList(name: string, opts: ProjectsLocationsPersistentResourcesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsPersistentResourcesOperationsWait(name: string, opts: ProjectsLocationsPersistentResourcesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsPersistentResourcesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a PersistentResource. * * @param name Immutable. Resource name of a PersistentResource. */ async projectsLocationsPersistentResourcesPatch(name: string, req: GoogleCloudAiplatformV1PersistentResource, opts: ProjectsLocationsPersistentResourcesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1PersistentResource(req); opts = serializeProjectsLocationsPersistentResourcesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Reboots a PersistentResource. * * @param name Required. The name of the PersistentResource resource. Format: `projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}` */ async projectsLocationsPersistentResourcesReboot(name: string, req: GoogleCloudAiplatformV1RebootPersistentResourceRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:reboot`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Batch cancel PipelineJobs. Firstly the server will check if all the jobs * are in non-terminal states, and skip the jobs that are already terminated. * If the operation failed, none of the pipeline jobs are cancelled. The * server will poll the states of all the pipeline jobs periodically to check * the cancellation status. This operation will return an LRO. * * @param parent Required. The name of the PipelineJobs' parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsBatchCancel(parent: string, req: GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs:batchCancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Batch deletes PipelineJobs The Operation is atomic. If it fails, none of * the PipelineJobs are deleted. If it succeeds, all of the PipelineJobs are * deleted. * * @param parent Required. The name of the PipelineJobs' parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsBatchDelete(parent: string, req: GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs:batchDelete`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Cancels a PipelineJob. Starts asynchronous cancellation on the * PipelineJob. The server makes a best effort to cancel the pipeline, but * success is not guaranteed. Clients can use PipelineService.GetPipelineJob * or other methods to check whether the cancellation succeeded or whether the * pipeline completed despite cancellation. On successful cancellation, the * PipelineJob is not deleted; instead it becomes a pipeline with a * PipelineJob.error value with a google.rpc.Status.code of 1, corresponding * to `Code.CANCELLED`, and PipelineJob.state is set to `CANCELLED`. * * @param name Required. The name of the PipelineJob to cancel. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}` */ async projectsLocationsPipelineJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelPipelineJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a PipelineJob. A PipelineJob will run immediately when created. * * @param parent Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsCreate(parent: string, req: GoogleCloudAiplatformV1PipelineJob, opts: ProjectsLocationsPipelineJobsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1PipelineJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs`); if (opts.pipelineJobId !== undefined) { url.searchParams.append("pipelineJobId", String(opts.pipelineJobId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1PipelineJob(data); } /** * Deletes a PipelineJob. * * @param name Required. The name of the PipelineJob resource to be deleted. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}` */ async projectsLocationsPipelineJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a PipelineJob. * * @param name Required. The name of the PipelineJob resource. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}` */ async projectsLocationsPipelineJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1PipelineJob(data); } /** * Lists PipelineJobs in a Location. * * @param parent Required. The resource name of the Location to list the PipelineJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsList(parent: string, opts: ProjectsLocationsPipelineJobsListOptions = {}): Promise { opts = serializeProjectsLocationsPipelineJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListPipelineJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsPipelineJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsPipelineJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsPipelineJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsPipelineJobsOperationsList(name: string, opts: ProjectsLocationsPipelineJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsPipelineJobsOperationsWait(name: string, opts: ProjectsLocationsPipelineJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsPipelineJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async projectsLocationsPublishersModelsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Fetch an asynchronous online prediction operation. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` */ async projectsLocationsPublishersModelsFetchPredictOperation(endpoint: string, req: GoogleCloudAiplatformV1FetchPredictOperationRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:fetchPredictOperation`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Perform an online prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsPredict(endpoint: string, req: GoogleCloudAiplatformV1PredictRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1PredictResponse; } async projectsLocationsPublishersModelsPredictLongRunning(endpoint: string, req: GoogleCloudAiplatformV1PredictLongRunningRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predictLongRunning`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Perform an online prediction with an arbitrary HTTP payload. The response * includes the following HTTP headers: * `X-Vertex-AI-Endpoint-Id`: ID of the * Endpoint that served this prediction. * `X-Vertex-AI-Deployed-Model-Id`: ID * of the Endpoint's DeployedModel that served this prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsRawPredict(endpoint: string, req: GoogleCloudAiplatformV1RawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1RawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:rawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Perform a server-side streaming online prediction request for Vertex LLM * streaming. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsServerStreamingPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamingPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamingPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:serverStreamingPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1StreamingPredictResponse(data); } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Perform a streaming online prediction with an arbitrary HTTP payload. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsStreamRawPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamRawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamRawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:streamRawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Creates a RagCorpus. * * @param parent Required. The resource name of the Location to create the RagCorpus in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsRagCorporaCreate(parent: string, req: GoogleCloudAiplatformV1RagCorpus): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/ragCorpora`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a RagCorpus. * * @param name Required. The name of the RagCorpus resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaDelete(name: string, opts: ProjectsLocationsRagCorporaDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a RagCorpus. * * @param name Required. The name of the RagCorpus resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1RagCorpus; } /** * Lists RagCorpora in a Location. * * @param parent Required. The resource name of the Location from which to list the RagCorpora. Format: `projects/{project}/locations/{location}` */ async projectsLocationsRagCorporaList(parent: string, opts: ProjectsLocationsRagCorporaListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/ragCorpora`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListRagCorporaResponse; } /** * Updates a RagCorpus. * * @param name Output only. The resource name of the RagCorpus. */ async projectsLocationsRagCorporaPatch(name: string, req: GoogleCloudAiplatformV1RagCorpus): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a RagFile. * * @param name Required. The name of the RagFile resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` */ async projectsLocationsRagCorporaRagFilesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a RagFile. * * @param name Required. The name of the RagFile resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` */ async projectsLocationsRagCorporaRagFilesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1RagFile(data); } /** * Import files from Google Cloud Storage or Google Drive into a RagCorpus. * * @param parent Required. The name of the RagCorpus resource into which to import files. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaRagFilesImport(parent: string, req: GoogleCloudAiplatformV1ImportRagFilesRequest): Promise { req = serializeGoogleCloudAiplatformV1ImportRagFilesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/ragFiles:import`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists RagFiles in a RagCorpus. * * @param parent Required. The resource name of the RagCorpus from which to list the RagFiles. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaRagFilesList(parent: string, opts: ProjectsLocationsRagCorporaRagFilesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/ragFiles`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListRagFilesResponse(data); } /** * Creates a reasoning engine. * * @param parent Required. The resource name of the Location to create the ReasoningEngine in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsReasoningEnginesCreate(parent: string, req: GoogleCloudAiplatformV1ReasoningEngine): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/reasoningEngines`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a reasoning engine. * * @param name Required. The name of the ReasoningEngine resource to be deleted. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` */ async projectsLocationsReasoningEnginesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a reasoning engine. * * @param name Required. The name of the ReasoningEngine resource. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` */ async projectsLocationsReasoningEnginesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ReasoningEngine; } /** * Lists reasoning engines in a location. * * @param parent Required. The resource name of the Location to list the ReasoningEngines from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsReasoningEnginesList(parent: string, opts: ProjectsLocationsReasoningEnginesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/reasoningEngines`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListReasoningEnginesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsReasoningEnginesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsReasoningEnginesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsReasoningEnginesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsReasoningEnginesOperationsList(name: string, opts: ProjectsLocationsReasoningEnginesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsReasoningEnginesOperationsWait(name: string, opts: ProjectsLocationsReasoningEnginesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsReasoningEnginesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a reasoning engine. * * @param name Identifier. The resource name of the ReasoningEngine. */ async projectsLocationsReasoningEnginesPatch(name: string, req: GoogleCloudAiplatformV1ReasoningEngine, opts: ProjectsLocationsReasoningEnginesPatchOptions = {}): Promise { opts = serializeProjectsLocationsReasoningEnginesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Queries using a reasoning engine. * * @param name Required. The name of the ReasoningEngine resource to use. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` */ async projectsLocationsReasoningEnginesQuery(name: string, req: GoogleCloudAiplatformV1QueryReasoningEngineRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:query`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1QueryReasoningEngineResponse; } /** * Retrieves relevant contexts for a query. * * @param parent Required. The resource name of the Location from which to retrieve RagContexts. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. */ async projectsLocationsRetrieveContexts(parent: string, req: GoogleCloudAiplatformV1RetrieveContextsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }:retrieveContexts`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1RetrieveContextsResponse; } /** * Creates a Schedule. * * @param parent Required. The resource name of the Location to create the Schedule in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsSchedulesCreate(parent: string, req: GoogleCloudAiplatformV1Schedule): Promise { req = serializeGoogleCloudAiplatformV1Schedule(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/schedules`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Schedule(data); } /** * Deletes a Schedule. * * @param name Required. The name of the Schedule resource to be deleted. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Schedule. * * @param name Required. The name of the Schedule resource. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Schedule(data); } /** * Lists Schedules in a Location. * * @param parent Required. The resource name of the Location to list the Schedules from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsSchedulesList(parent: string, opts: ProjectsLocationsSchedulesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/schedules`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListSchedulesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsSchedulesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsSchedulesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsSchedulesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsSchedulesOperationsList(name: string, opts: ProjectsLocationsSchedulesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsSchedulesOperationsWait(name: string, opts: ProjectsLocationsSchedulesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsSchedulesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an active or paused Schedule. When the Schedule is updated, new * runs will be scheduled starting from the updated next execution time after * the update time based on the time_specification in the updated Schedule. * All unstarted runs before the update time will be skipped while already * created runs will NOT be paused or canceled. * * @param name Immutable. The resource name of the Schedule. */ async projectsLocationsSchedulesPatch(name: string, req: GoogleCloudAiplatformV1Schedule, opts: ProjectsLocationsSchedulesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Schedule(req); opts = serializeProjectsLocationsSchedulesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1Schedule(data); } /** * Pauses a Schedule. Will mark Schedule.state to 'PAUSED'. If the schedule * is paused, no new runs will be created. Already created runs will NOT be * paused or canceled. * * @param name Required. The name of the Schedule resource to be paused. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesPause(name: string, req: GoogleCloudAiplatformV1PauseScheduleRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:pause`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Resumes a paused Schedule to start scheduling new runs. Will mark * Schedule.state to 'ACTIVE'. Only paused Schedule can be resumed. When the * Schedule is resumed, new runs will be scheduled starting from the next * execution time after the current time based on the time_specification in * the Schedule. If Schedule.catch_up is set up true, all missed runs will be * scheduled for backfill first. * * @param name Required. The name of the Schedule resource to be resumed. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesResume(name: string, req: GoogleCloudAiplatformV1ResumeScheduleRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:resume`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a SpecialistPool. * * @param parent Required. The parent Project name for the new SpecialistPool. The form is `projects/{project}/locations/{location}`. */ async projectsLocationsSpecialistPoolsCreate(parent: string, req: GoogleCloudAiplatformV1SpecialistPool): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/specialistPools`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a SpecialistPool as well as all Specialists in the pool. * * @param name Required. The resource name of the SpecialistPool to delete. Format: `projects/{project}/locations/{location}/specialistPools/{specialist_pool}` */ async projectsLocationsSpecialistPoolsDelete(name: string, opts: ProjectsLocationsSpecialistPoolsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a SpecialistPool. * * @param name Required. The name of the SpecialistPool resource. The form is `projects/{project}/locations/{location}/specialistPools/{specialist_pool}`. */ async projectsLocationsSpecialistPoolsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1SpecialistPool; } /** * Lists SpecialistPools in a Location. * * @param parent Required. The name of the SpecialistPool's parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsSpecialistPoolsList(parent: string, opts: ProjectsLocationsSpecialistPoolsListOptions = {}): Promise { opts = serializeProjectsLocationsSpecialistPoolsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/specialistPools`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListSpecialistPoolsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsSpecialistPoolsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsSpecialistPoolsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsSpecialistPoolsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsSpecialistPoolsOperationsList(name: string, opts: ProjectsLocationsSpecialistPoolsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsSpecialistPoolsOperationsWait(name: string, opts: ProjectsLocationsSpecialistPoolsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsSpecialistPoolsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a SpecialistPool. * * @param name Required. The resource name of the SpecialistPool. */ async projectsLocationsSpecialistPoolsPatch(name: string, req: GoogleCloudAiplatformV1SpecialistPool, opts: ProjectsLocationsSpecialistPoolsPatchOptions = {}): Promise { opts = serializeProjectsLocationsSpecialistPoolsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Creates a Study. A resource name will be generated after creation of the * Study. * * @param parent Required. The resource name of the Location to create the CustomJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsStudiesCreate(parent: string, req: GoogleCloudAiplatformV1Study): Promise { req = serializeGoogleCloudAiplatformV1Study(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/studies`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Study(data); } /** * Deletes a Study. * * @param name Required. The name of the Study resource to be deleted. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets a Study by name. * * @param name Required. The name of the Study resource. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Study(data); } /** * Lists all the studies in a region for an associated project. * * @param parent Required. The resource name of the Location to list the Study from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsStudiesList(parent: string, opts: ProjectsLocationsStudiesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/studies`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListStudiesResponse(data); } /** * Looks a study up using the user-defined display_name field instead of the * fully qualified resource name. * * @param parent Required. The resource name of the Location to get the Study from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsStudiesLookup(parent: string, req: GoogleCloudAiplatformV1LookupStudyRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/studies:lookup`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Study(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsStudiesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsStudiesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsStudiesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsStudiesOperationsList(name: string, opts: ProjectsLocationsStudiesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsStudiesOperationsWait(name: string, opts: ProjectsLocationsStudiesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsStudiesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Adds a measurement of the objective metrics to a Trial. This measurement * is assumed to have been taken before the Trial is complete. * * @param trialName Required. The name of the trial to add measurement. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsAddTrialMeasurement(trialName: string, req: GoogleCloudAiplatformV1AddTrialMeasurementRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ trialName }:addTrialMeasurement`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Checks whether a Trial should stop or not. Returns a long-running * operation. When the operation is successful, it will contain a * CheckTrialEarlyStoppingStateResponse. * * @param trialName Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsCheckTrialEarlyStoppingState(trialName: string, req: GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ trialName }:checkTrialEarlyStoppingState`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Marks a Trial as complete. * * @param name Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsComplete(name: string, req: GoogleCloudAiplatformV1CompleteTrialRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:complete`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Adds a user provided Trial to a Study. * * @param parent Required. The resource name of the Study to create the Trial in. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesTrialsCreate(parent: string, req: GoogleCloudAiplatformV1Trial): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Deletes a Trial. * * @param name Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets a Trial. * * @param name Required. The name of the Trial resource. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Trial; } /** * Lists the Trials associated with a Study. * * @param parent Required. The resource name of the Study to list the Trial from. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesTrialsList(parent: string, opts: ProjectsLocationsStudiesTrialsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTrialsResponse; } /** * Lists the pareto-optimal Trials for multi-objective Study or the optimal * Trials for single-objective Study. The definition of pareto-optimal can be * checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency * * @param parent Required. The name of the Study that the optimal Trial belongs to. */ async projectsLocationsStudiesTrialsListOptimalTrials(parent: string, req: GoogleCloudAiplatformV1ListOptimalTrialsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials:listOptimalTrials`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1ListOptimalTrialsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsStudiesTrialsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsStudiesTrialsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsStudiesTrialsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsStudiesTrialsOperationsList(name: string, opts: ProjectsLocationsStudiesTrialsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsStudiesTrialsOperationsWait(name: string, opts: ProjectsLocationsStudiesTrialsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsStudiesTrialsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Stops a Trial. * * @param name Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsStop(name: string, req: GoogleCloudAiplatformV1StopTrialRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:stop`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Adds one or more Trials to a Study, with parameter values suggested by * Vertex AI Vizier. Returns a long-running operation associated with the * generation of Trial suggestions. When this long-running operation succeeds, * it will contain a SuggestTrialsResponse. * * @param parent Required. The project and location that the Study belongs to. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesTrialsSuggest(parent: string, req: GoogleCloudAiplatformV1SuggestTrialsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials:suggest`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Reads multiple TensorboardTimeSeries' data. The data point number limit is * 1000 for scalars, 100 for tensors and blob references. If the number of * data points stored is less than the limit, all data is returned. Otherwise, * the number limit of data points is randomly selected from this time series * and returned. * * @param tensorboard Required. The resource name of the Tensorboard containing TensorboardTimeSeries to read data from. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}`. The TensorboardTimeSeries referenced by time_series must be sub resources of this Tensorboard. */ async projectsLocationsTensorboardsBatchRead(tensorboard: string, opts: ProjectsLocationsTensorboardsBatchReadOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboard }:batchRead`); if (opts.timeSeries !== undefined) { url.searchParams.append("timeSeries", String(opts.timeSeries)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse(data); } /** * Creates a Tensorboard. * * @param parent Required. The resource name of the Location to create the Tensorboard in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTensorboardsCreate(parent: string, req: GoogleCloudAiplatformV1Tensorboard): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/tensorboards`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Tensorboard. * * @param name Required. The name of the Tensorboard to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Batch create TensorboardTimeSeries that belong to a TensorboardExperiment. * * @param parent Required. The resource name of the TensorboardExperiment to create the TensorboardTimeSeries in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` The TensorboardRuns referenced by the parent fields in the CreateTensorboardTimeSeriesRequest messages must be sub resources of this TensorboardExperiment. */ async projectsLocationsTensorboardsExperimentsBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest): Promise { req = serializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse(data); } /** * Creates a TensorboardExperiment. * * @param parent Required. The resource name of the Tensorboard to create the TensorboardExperiment in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsExperimentsCreate(parent: string, req: GoogleCloudAiplatformV1TensorboardExperiment, opts: ProjectsLocationsTensorboardsExperimentsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/experiments`); if (opts.tensorboardExperimentId !== undefined) { url.searchParams.append("tensorboardExperimentId", String(opts.tensorboardExperimentId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1TensorboardExperiment; } /** * Deletes a TensorboardExperiment. * * @param name Required. The name of the TensorboardExperiment to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a TensorboardExperiment. * * @param name Required. The name of the TensorboardExperiment resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1TensorboardExperiment; } /** * Lists TensorboardExperiments in a Location. * * @param parent Required. The resource name of the Tensorboard to list TensorboardExperiments. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsExperimentsList(parent: string, opts: ProjectsLocationsTensorboardsExperimentsListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/experiments`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTensorboardExperimentsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsExperimentsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsExperimentsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsExperimentsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsExperimentsOperationsList(name: string, opts: ProjectsLocationsTensorboardsExperimentsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsExperimentsOperationsWait(name: string, opts: ProjectsLocationsTensorboardsExperimentsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a TensorboardExperiment. * * @param name Output only. Name of the TensorboardExperiment. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsPatch(name: string, req: GoogleCloudAiplatformV1TensorboardExperiment, opts: ProjectsLocationsTensorboardsExperimentsPatchOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1TensorboardExperiment; } /** * Batch create TensorboardRuns. * * @param parent Required. The resource name of the TensorboardExperiment to create the TensorboardRuns in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` The parent field in the CreateTensorboardRunRequest messages must match this field. */ async projectsLocationsTensorboardsExperimentsRunsBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/runs:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse; } /** * Creates a TensorboardRun. * * @param parent Required. The resource name of the TensorboardExperiment to create the TensorboardRun in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsRunsCreate(parent: string, req: GoogleCloudAiplatformV1TensorboardRun, opts: ProjectsLocationsTensorboardsExperimentsRunsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/runs`); if (opts.tensorboardRunId !== undefined) { url.searchParams.append("tensorboardRunId", String(opts.tensorboardRunId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1TensorboardRun; } /** * Deletes a TensorboardRun. * * @param name Required. The name of the TensorboardRun to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a TensorboardRun. * * @param name Required. The name of the TensorboardRun resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1TensorboardRun; } /** * Lists TensorboardRuns in a Location. * * @param parent Required. The resource name of the TensorboardExperiment to list TensorboardRuns. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsRunsList(parent: string, opts: ProjectsLocationsTensorboardsExperimentsRunsListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/runs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTensorboardRunsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsExperimentsRunsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsExperimentsRunsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsExperimentsRunsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsExperimentsRunsOperationsList(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsExperimentsRunsOperationsWait(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a TensorboardRun. * * @param name Output only. Name of the TensorboardRun. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsPatch(name: string, req: GoogleCloudAiplatformV1TensorboardRun, opts: ProjectsLocationsTensorboardsExperimentsRunsPatchOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1TensorboardRun; } /** * Creates a TensorboardTimeSeries. * * @param parent Required. The resource name of the TensorboardRun to create the TensorboardTimeSeries in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesCreate(parent: string, req: GoogleCloudAiplatformV1TensorboardTimeSeries, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1TensorboardTimeSeries(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/timeSeries`); if (opts.tensorboardTimeSeriesId !== undefined) { url.searchParams.append("tensorboardTimeSeriesId", String(opts.tensorboardTimeSeriesId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data); } /** * Deletes a TensorboardTimeSeries. * * @param name Required. The name of the TensorboardTimeSeries to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Exports a TensorboardTimeSeries' data. Data is returned in paginated * responses. * * @param tensorboardTimeSeries Required. The resource name of the TensorboardTimeSeries to export data from. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesExportTensorboardTimeSeries(tensorboardTimeSeries: string, req: GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboardTimeSeries }:exportTensorboardTimeSeries`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse(data); } /** * Gets a TensorboardTimeSeries. * * @param name Required. The name of the TensorboardTimeSeries resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data); } /** * Lists TensorboardTimeSeries in a Location. * * @param parent Required. The resource name of the TensorboardRun to list TensorboardTimeSeries. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesList(parent: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/timeSeries`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsList(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWait(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a TensorboardTimeSeries. * * @param name Output only. Name of the TensorboardTimeSeries. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesPatch(name: string, req: GoogleCloudAiplatformV1TensorboardTimeSeries, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1TensorboardTimeSeries(req); opts = serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data); } /** * Reads a TensorboardTimeSeries' data. By default, if the number of data * points stored is less than 1000, all data is returned. Otherwise, 1000 data * points is randomly selected from this time series and returned. This value * can be changed by changing max_data_points, which can't be greater than * 10k. * * @param tensorboardTimeSeries Required. The resource name of the TensorboardTimeSeries to read data from. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesRead(tensorboardTimeSeries: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesReadOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboardTimeSeries }:read`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.maxDataPoints !== undefined) { url.searchParams.append("maxDataPoints", String(opts.maxDataPoints)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardTimeSeriesDataResponse(data); } /** * Gets bytes of TensorboardBlobs. This is to allow reading blob data stored * in consumer project's Cloud Storage bucket without users having to obtain * Cloud Storage access permission. * * @param timeSeries Required. The resource name of the TensorboardTimeSeries to list Blobs. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesReadBlobData(timeSeries: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesReadBlobDataOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ timeSeries }:readBlobData`); if (opts.blobIds !== undefined) { url.searchParams.append("blobIds", String(opts.blobIds)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardBlobDataResponse(data); } /** * Write time series data points into multiple TensorboardTimeSeries under a * TensorboardRun. If any data fail to be ingested, an error is returned. * * @param tensorboardRun Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsWrite(tensorboardRun: string, req: GoogleCloudAiplatformV1WriteTensorboardRunDataRequest): Promise { req = serializeGoogleCloudAiplatformV1WriteTensorboardRunDataRequest(req); const url = new URL(`${this.#baseUrl}v1/${ tensorboardRun }:write`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1WriteTensorboardRunDataResponse; } /** * Write time series data points of multiple TensorboardTimeSeries in * multiple TensorboardRun's. If any data fail to be ingested, an error is * returned. * * @param tensorboardExperiment Required. The resource name of the TensorboardExperiment to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsWrite(tensorboardExperiment: string, req: GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest): Promise { req = serializeGoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest(req); const url = new URL(`${this.#baseUrl}v1/${ tensorboardExperiment }:write`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse; } /** * Gets a Tensorboard. * * @param name Required. The name of the Tensorboard resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Tensorboard; } /** * Lists Tensorboards in a Location. * * @param parent Required. The resource name of the Location to list Tensorboards. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTensorboardsList(parent: string, opts: ProjectsLocationsTensorboardsListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/tensorboards`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTensorboardsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsOperationsList(name: string, opts: ProjectsLocationsTensorboardsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsOperationsWait(name: string, opts: ProjectsLocationsTensorboardsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a Tensorboard. * * @param name Output only. Name of the Tensorboard. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsPatch(name: string, req: GoogleCloudAiplatformV1Tensorboard, opts: ProjectsLocationsTensorboardsPatchOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Returns the storage size for a given TensorBoard instance. * * @param tensorboard Required. The name of the Tensorboard resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsReadSize(tensorboard: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboard }:readSize`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardSizeResponse(data); } /** * Returns a list of monthly active users for a given TensorBoard instance. * * @param tensorboard Required. The name of the Tensorboard resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsReadUsage(tensorboard: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboard }:readUsage`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardUsageResponse(data); } /** * Cancels a TrainingPipeline. Starts asynchronous cancellation on the * TrainingPipeline. The server makes a best effort to cancel the pipeline, * but success is not guaranteed. Clients can use * PipelineService.GetTrainingPipeline or other methods to check whether the * cancellation succeeded or whether the pipeline completed despite * cancellation. On successful cancellation, the TrainingPipeline is not * deleted; instead it becomes a pipeline with a TrainingPipeline.error value * with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * TrainingPipeline.state is set to `CANCELLED`. * * @param name Required. The name of the TrainingPipeline to cancel. Format: `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` */ async projectsLocationsTrainingPipelinesCancel(name: string, req: GoogleCloudAiplatformV1CancelTrainingPipelineRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a TrainingPipeline. A created TrainingPipeline right away will be * attempted to be run. * * @param parent Required. The resource name of the Location to create the TrainingPipeline in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTrainingPipelinesCreate(parent: string, req: GoogleCloudAiplatformV1TrainingPipeline): Promise { req = serializeGoogleCloudAiplatformV1TrainingPipeline(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/trainingPipelines`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1TrainingPipeline(data); } /** * Deletes a TrainingPipeline. * * @param name Required. The name of the TrainingPipeline resource to be deleted. Format: `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` */ async projectsLocationsTrainingPipelinesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a TrainingPipeline. * * @param name Required. The name of the TrainingPipeline resource. Format: `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` */ async projectsLocationsTrainingPipelinesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1TrainingPipeline(data); } /** * Lists TrainingPipelines in a Location. * * @param parent Required. The resource name of the Location to list the TrainingPipelines from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTrainingPipelinesList(parent: string, opts: ProjectsLocationsTrainingPipelinesListOptions = {}): Promise { opts = serializeProjectsLocationsTrainingPipelinesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/trainingPipelines`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListTrainingPipelinesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTrainingPipelinesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTrainingPipelinesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTrainingPipelinesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTrainingPipelinesOperationsList(name: string, opts: ProjectsLocationsTrainingPipelinesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTrainingPipelinesOperationsWait(name: string, opts: ProjectsLocationsTrainingPipelinesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTrainingPipelinesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Cancels a TuningJob. Starts asynchronous cancellation on the TuningJob. * The server makes a best effort to cancel the job, but success is not * guaranteed. Clients can use GenAiTuningService.GetTuningJob or other * methods to check whether the cancellation succeeded or whether the job * completed despite cancellation. On successful cancellation, the TuningJob * is not deleted; instead it becomes a job with a TuningJob.error value with * a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * TuningJob.state is set to `CANCELLED`. * * @param name Required. The name of the TuningJob to cancel. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}` */ async projectsLocationsTuningJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelTuningJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a TuningJob. A created TuningJob right away will be attempted to * be run. * * @param parent Required. The resource name of the Location to create the TuningJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTuningJobsCreate(parent: string, req: GoogleCloudAiplatformV1TuningJob): Promise { req = serializeGoogleCloudAiplatformV1TuningJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/tuningJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1TuningJob(data); } /** * Gets a TuningJob. * * @param name Required. The name of the TuningJob resource. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}` */ async projectsLocationsTuningJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1TuningJob(data); } /** * Lists TuningJobs in a Location. * * @param parent Required. The resource name of the Location to list the TuningJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTuningJobsList(parent: string, opts: ProjectsLocationsTuningJobsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/tuningJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListTuningJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTuningJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTuningJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTuningJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTuningJobsOperationsList(name: string, opts: ProjectsLocationsTuningJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Rebase a TunedModel. * * @param parent Required. The resource name of the Location into which to rebase the Model. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTuningJobsRebaseTunedModel(parent: string, req: GoogleCloudAiplatformV1RebaseTunedModelRequest): Promise { req = serializeGoogleCloudAiplatformV1RebaseTunedModelRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/tuningJobs:rebaseTunedModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Updates a cache config. * * @param name Identifier. Name of the cache config. Format: - `projects/{project}/cacheConfig`. */ async projectsUpdateCacheConfig(name: string, req: GoogleCloudAiplatformV1CacheConfig): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async publishersModelsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async publishersModelsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async publishersModelsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Gets a Model Garden publisher model. * * @param name Required. The name of the PublisherModel resource. Format: `publishers/{publisher}/models/{publisher_model}` */ async publishersModelsGet(name: string, opts: PublishersModelsGetOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.huggingFaceToken !== undefined) { url.searchParams.append("huggingFaceToken", String(opts.huggingFaceToken)); } if (opts.isHuggingFaceModel !== undefined) { url.searchParams.append("isHuggingFaceModel", String(opts.isHuggingFaceModel)); } if (opts.languageCode !== undefined) { url.searchParams.append("languageCode", String(opts.languageCode)); } if (opts.view !== undefined) { url.searchParams.append("view", String(opts.view)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1PublisherModel(data); } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async publishersModelsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } } /** * Generate video response. */ export interface CloudAiLargeModelsVisionGenerateVideoResponse { /** * The generates samples. */ generatedSamples?: CloudAiLargeModelsVisionMedia[]; /** * Returns if any videos were filtered due to RAI policies. */ raiMediaFilteredCount?: number; /** * Returns rai failure reasons if any. */ raiMediaFilteredReasons?: string[]; } function serializeCloudAiLargeModelsVisionGenerateVideoResponse(data: any): CloudAiLargeModelsVisionGenerateVideoResponse { return { ...data, generatedSamples: data["generatedSamples"] !== undefined ? data["generatedSamples"].map((item: any) => (serializeCloudAiLargeModelsVisionMedia(item))) : undefined, }; } function deserializeCloudAiLargeModelsVisionGenerateVideoResponse(data: any): CloudAiLargeModelsVisionGenerateVideoResponse { return { ...data, generatedSamples: data["generatedSamples"] !== undefined ? data["generatedSamples"].map((item: any) => (deserializeCloudAiLargeModelsVisionMedia(item))) : undefined, }; } /** * Image. */ export interface CloudAiLargeModelsVisionImage { /** * Image encoding, encoded as "image/png" or "image/jpg". */ encoding?: string; /** * Generation seed for the sampled image. This parameter is exposed to the * user only if one of the following is true: 1. The user specified * per-example seeds in the request. 2. The user doesn't specify the * generation seed in the request. */ generationSeed?: number; /** * Raw bytes. */ image?: Uint8Array; /** * RAI scores for generated image. */ imageRaiScores?: CloudAiLargeModelsVisionImageRAIScores; /** * Image size. The size of the image. Can be self reported, or computed from * the image bytes. */ imageSize?: CloudAiLargeModelsVisionImageImageSize; /** * RAI info for image. */ raiInfo?: CloudAiLargeModelsVisionRaiInfo; /** * Semantic filter info for image. */ semanticFilterResponse?: CloudAiLargeModelsVisionSemanticFilterResponse; /** * Text/Expanded text input for imagen. */ text?: string; /** * Path to another storage (typically Google Cloud Storage). */ uri?: string; } function serializeCloudAiLargeModelsVisionImage(data: any): CloudAiLargeModelsVisionImage { return { ...data, image: data["image"] !== undefined ? encodeBase64(data["image"]) : undefined, }; } function deserializeCloudAiLargeModelsVisionImage(data: any): CloudAiLargeModelsVisionImage { return { ...data, image: data["image"] !== undefined ? decodeBase64(data["image"] as string) : undefined, }; } /** * Image size. */ export interface CloudAiLargeModelsVisionImageImageSize { channels?: number; height?: number; width?: number; } /** * RAI scores for generated image returned. */ export interface CloudAiLargeModelsVisionImageRAIScores { /** * Agile watermark score for image. */ agileWatermarkDetectionScore?: number; } /** * Media. */ export interface CloudAiLargeModelsVisionMedia { /** * Image. */ image?: CloudAiLargeModelsVisionImage; /** * Video */ video?: CloudAiLargeModelsVisionVideo; } function serializeCloudAiLargeModelsVisionMedia(data: any): CloudAiLargeModelsVisionMedia { return { ...data, image: data["image"] !== undefined ? serializeCloudAiLargeModelsVisionImage(data["image"]) : undefined, video: data["video"] !== undefined ? serializeCloudAiLargeModelsVisionVideo(data["video"]) : undefined, }; } function deserializeCloudAiLargeModelsVisionMedia(data: any): CloudAiLargeModelsVisionMedia { return { ...data, image: data["image"] !== undefined ? deserializeCloudAiLargeModelsVisionImage(data["image"]) : undefined, video: data["video"] !== undefined ? deserializeCloudAiLargeModelsVisionVideo(data["video"]) : undefined, }; } export interface CloudAiLargeModelsVisionNamedBoundingBox { classes?: string[]; entities?: string[]; scores?: number[]; x1?: number; x2?: number; y1?: number; y2?: number; } /** * Next ID: 6 */ export interface CloudAiLargeModelsVisionRaiInfo { /** * List of blocked entities from the blocklist if it is detected. */ blockedEntities?: string[]; /** * The list of detected labels for different rai categories. */ detectedLabels?: CloudAiLargeModelsVisionRaiInfoDetectedLabels[]; /** * The model name used to indexing into the RaiFilterConfig map. Would either * be one of imagegeneration@002-006, imagen-3.0-... api endpoint names, or * internal names used for mapping to different filter configs (genselfie, * ai_watermark) than its api endpoint. */ modelName?: string; /** * List of rai categories' information to return */ raiCategories?: string[]; /** * List of rai scores mapping to the rai categories. Rounded to 1 decimal * place. */ scores?: number[]; } /** * Filters returning list of deteceted labels, scores, and bounding boxes. */ export interface CloudAiLargeModelsVisionRaiInfoDetectedLabels { /** * The list of detected entities for the rai signal. */ entities?: CloudAiLargeModelsVisionRaiInfoDetectedLabelsEntity[]; /** * The RAI category for the deteceted labels. */ raiCategory?: string; } /** * An integer bounding box of original pixels of the image for the detected * labels. */ export interface CloudAiLargeModelsVisionRaiInfoDetectedLabelsBoundingBox { /** * The X coordinate of the top-left corner, in pixels. */ x1?: number; /** * The X coordinate of the bottom-right corner, in pixels. */ x2?: number; /** * The Y coordinate of the top-left corner, in pixels. */ y1?: number; /** * The Y coordinate of the bottom-right corner, in pixels. */ y2?: number; } /** * The properties for a detected entity from the rai signal. */ export interface CloudAiLargeModelsVisionRaiInfoDetectedLabelsEntity { /** * Bounding box of the label */ boundingBox?: CloudAiLargeModelsVisionRaiInfoDetectedLabelsBoundingBox; /** * Description of the label */ description?: string; /** * The intersection ratio between the detection bounding box and the mask. */ iouScore?: number; /** * MID of the label */ mid?: string; /** * Confidence score of the label */ score?: number; } export interface CloudAiLargeModelsVisionSemanticFilterResponse { /** * Class labels of the bounding boxes that failed the semantic filtering. * Bounding box coordinates. */ namedBoundingBoxes?: CloudAiLargeModelsVisionNamedBoundingBox[]; /** * This response is added when semantic filter config is turned on in * EditConfig. It reports if this image is passed semantic filter response. If * passed_semantic_filter is false, the bounding box information will be * populated for user to check what caused the semantic filter to fail. */ passedSemanticFilter?: boolean; } /** * Video */ export interface CloudAiLargeModelsVisionVideo { /** * Base 64 encoded video bytes. */ encodedVideo?: Uint8Array; /** * Video encoding, for example "video/mp4". */ encoding?: string; /** * Path to another storage (typically Google Cloud Storage). */ uri?: string; /** * Raw bytes. */ video?: Uint8Array; } function serializeCloudAiLargeModelsVisionVideo(data: any): CloudAiLargeModelsVisionVideo { return { ...data, encodedVideo: data["encodedVideo"] !== undefined ? encodeBase64(data["encodedVideo"]) : undefined, video: data["video"] !== undefined ? encodeBase64(data["video"]) : undefined, }; } function deserializeCloudAiLargeModelsVisionVideo(data: any): CloudAiLargeModelsVisionVideo { return { ...data, encodedVideo: data["encodedVideo"] !== undefined ? decodeBase64(data["encodedVideo"] as string) : undefined, video: data["video"] !== undefined ? decodeBase64(data["video"] as string) : undefined, }; } /** * Create API error message for Vertex Pipeline. Next Id: 3. */ export interface CloudAiPlatformCommonCreatePipelineJobApiErrorDetail { /** * The error root cause returned by CreatePipelineJob API. */ errorCause?: | "ERROR_CAUSE_UNSPECIFIED" | "INVALID_PIPELINE_SPEC_FORMAT" | "INVALID_PIPELINE_SPEC" | "INVALID_DEPLOYMENT_CONFIG" | "INVALID_DEPLOYMENT_SPEC" | "INVALID_INSTANCE_SCHEMA" | "INVALID_CUSTOM_JOB" | "INVALID_CONTAINER_SPEC" | "INVALID_NOTIFICATION_EMAIL_SETUP" | "INVALID_SERVICE_ACCOUNT_SETUP" | "INVALID_KMS_SETUP" | "INVALID_NETWORK_SETUP" | "INVALID_PIPELINE_TASK_SPEC" | "INVALID_PIPELINE_TASK_ARTIFACT" | "INVALID_IMPORTER_SPEC" | "INVALID_RESOLVER_SPEC" | "INVALID_RUNTIME_PARAMETERS" | "CLOUD_API_NOT_ENABLED" | "INVALID_GCS_INPUT_URI" | "INVALID_GCS_OUTPUT_URI" | "INVALID_COMPONENT_SPEC" | "INVALID_DAG_OUTPUTS_SPEC" | "INVALID_DAG_SPEC" | "INSUFFICIENT_QUOTA" | "INTERNAL"; /** * Public messages contains actionable items for the error cause. */ publicMessage?: string; } /** * Additional options for AIplatform#datasetsCreate. */ export interface DatasetsCreateOptions { /** * Required. The resource name of the Location to create the Dataset in. * Format: `projects/{project}/locations/{location}` */ parent?: string; } /** * Additional options for AIplatform#datasetsDatasetVersionsGet. */ export interface DatasetsDatasetVersionsGetOptions { /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsDatasetVersionsGetOptions(data: any): DatasetsDatasetVersionsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsDatasetVersionsGetOptions(data: any): DatasetsDatasetVersionsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsDatasetVersionsList. */ export interface DatasetsDatasetVersionsListOptions { /** * Optional. The standard list filter. */ filter?: string; /** * Optional. A comma-separated list of fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. */ orderBy?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. */ pageToken?: string; /** * Optional. Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsDatasetVersionsListOptions(data: any): DatasetsDatasetVersionsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsDatasetVersionsListOptions(data: any): DatasetsDatasetVersionsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsDatasetVersionsPatch. */ export interface DatasetsDatasetVersionsPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. Updatable fields: * * `display_name` */ updateMask?: string /* FieldMask */; } function serializeDatasetsDatasetVersionsPatchOptions(data: any): DatasetsDatasetVersionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeDatasetsDatasetVersionsPatchOptions(data: any): DatasetsDatasetVersionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsGet. */ export interface DatasetsGetOptions { /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsGetOptions(data: any): DatasetsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsGetOptions(data: any): DatasetsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsList. */ export interface DatasetsListOptions { /** * An expression for filtering the results of the request. For field names * both snake_case and camelCase are supported. * `display_name`: supports = * and != * `metadata_schema_uri`: supports = and != * `labels` supports * general map functions that is: * `labels.key=value` - key:value equality * * `labels.key:* or labels:key - key existence * A key including a space must * be quoted. `labels."a key"`. Some examples: * `displayName="myDisplayName"` * * `labels.myKey="myValue"` */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `display_name` * `create_time` * `update_time` */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Required. The name of the Dataset's parent resource. Format: * `projects/{project}/locations/{location}` */ parent?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsListOptions(data: any): DatasetsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsListOptions(data: any): DatasetsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsPatch. */ export interface DatasetsPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. Updatable fields: * * `display_name` * `description` * `labels` */ updateMask?: string /* FieldMask */; } function serializeDatasetsPatchOptions(data: any): DatasetsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeDatasetsPatchOptions(data: any): DatasetsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Message that represents an arbitrary HTTP body. It should only be used for * payload formats that can't be represented as JSON, such as raw binary or an * HTML page. This message can be used both in streaming and non-streaming API * methods in the request as well as the response. It can be used as a top-level * request field, which is convenient if one wants to extract parameters from * either the URL or HTTP template into the request fields and also want access * to the raw HTTP body. Example: message GetResourceRequest { // A unique * request id. string request_id = 1; // The raw HTTP body is bound to this * field. google.api.HttpBody http_body = 2; } service ResourceService { rpc * GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc * UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } * Example with streaming methods: service CaldavService { rpc * GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); * rpc UpdateCalendar(stream google.api.HttpBody) returns (stream * google.api.HttpBody); } Use of this type only changes how the request and * response bodies are handled, all other features will continue to work * unchanged. */ export interface GoogleApiHttpBody { /** * The HTTP Content-Type header value specifying the content type of the * body. */ contentType?: string; /** * The HTTP request/response body as raw binary. */ data?: Uint8Array; /** * Application specific response metadata. Must be set in the first response * for streaming APIs. */ extensions?: { [key: string]: any }[]; } function serializeGoogleApiHttpBody(data: any): GoogleApiHttpBody { return { ...data, data: data["data"] !== undefined ? encodeBase64(data["data"]) : undefined, }; } function deserializeGoogleApiHttpBody(data: any): GoogleApiHttpBody { return { ...data, data: data["data"] !== undefined ? decodeBase64(data["data"] as string) : undefined, }; } /** * Parameters that configure the active learning pipeline. Active learning will * label the data incrementally by several iterations. For every iteration, it * will select a batch of data based on the sampling strategy. */ export interface GoogleCloudAiplatformV1ActiveLearningConfig { /** * Max number of human labeled DataItems. */ maxDataItemCount?: bigint; /** * Max percent of total DataItems for human labeling. */ maxDataItemPercentage?: number; /** * Active learning data sampling config. For every active learning labeling * iteration, it will select a batch of data based on the sampling strategy. */ sampleConfig?: GoogleCloudAiplatformV1SampleConfig; /** * CMLE training config. For every active learning labeling iteration, system * will train a machine learning model on CMLE. The trained model will be used * by data sampling algorithm to select DataItems. */ trainingConfig?: GoogleCloudAiplatformV1TrainingConfig; } function serializeGoogleCloudAiplatformV1ActiveLearningConfig(data: any): GoogleCloudAiplatformV1ActiveLearningConfig { return { ...data, maxDataItemCount: data["maxDataItemCount"] !== undefined ? String(data["maxDataItemCount"]) : undefined, trainingConfig: data["trainingConfig"] !== undefined ? serializeGoogleCloudAiplatformV1TrainingConfig(data["trainingConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ActiveLearningConfig(data: any): GoogleCloudAiplatformV1ActiveLearningConfig { return { ...data, maxDataItemCount: data["maxDataItemCount"] !== undefined ? BigInt(data["maxDataItemCount"]) : undefined, trainingConfig: data["trainingConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1TrainingConfig(data["trainingConfig"]) : undefined, }; } /** * Request message for MetadataService.AddContextArtifactsAndExecutions. */ export interface GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest { /** * The resource names of the Artifacts to attribute to the Context. Format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` */ artifacts?: string[]; /** * The resource names of the Executions to associate with the Context. * Format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ executions?: string[]; } /** * Response message for MetadataService.AddContextArtifactsAndExecutions. */ export interface GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse { } /** * Request message for MetadataService.AddContextChildren. */ export interface GoogleCloudAiplatformV1AddContextChildrenRequest { /** * The resource names of the child Contexts. */ childContexts?: string[]; } /** * Response message for MetadataService.AddContextChildren. */ export interface GoogleCloudAiplatformV1AddContextChildrenResponse { } /** * Request message for MetadataService.AddExecutionEvents. */ export interface GoogleCloudAiplatformV1AddExecutionEventsRequest { /** * The Events to create and add. */ events?: GoogleCloudAiplatformV1Event[]; } /** * Response message for MetadataService.AddExecutionEvents. */ export interface GoogleCloudAiplatformV1AddExecutionEventsResponse { } /** * Request message for VizierService.AddTrialMeasurement. */ export interface GoogleCloudAiplatformV1AddTrialMeasurementRequest { /** * Required. The measurement to be added to a Trial. */ measurement?: GoogleCloudAiplatformV1Measurement; } /** * Used to assign specific AnnotationSpec to a particular area of a DataItem or * the whole part of the DataItem. */ export interface GoogleCloudAiplatformV1Annotation { /** * Output only. The source of the Annotation. */ readonly annotationSource?: GoogleCloudAiplatformV1UserActionReference; /** * Output only. Timestamp when this Annotation was created. */ readonly createTime?: Date; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * Annotations. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * No more than 64 user labels can be associated with one Annotation(System * labels are excluded). See https://goo.gl/xmQnxf for more information and * examples of labels. System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. Following system labels * exist for each Annotation: * * "aiplatform.googleapis.com/annotation_set_name": optional, name of the UI's * annotation set this Annotation belongs to. If not set, the Annotation is * not visible in the UI. * "aiplatform.googleapis.com/payload_schema": output * only, its value is the payload_schema's title. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the Annotation. */ readonly name?: string; /** * Required. The schema of the payload can be found in payload_schema. */ payload?: any; /** * Required. Google Cloud Storage URI points to a YAML file describing * payload. The schema is defined as an [OpenAPI 3.0.2 Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * The schema files that can be used here are found in * gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the * chosen schema must be consistent with the parent Dataset's metadata. */ payloadSchemaUri?: string; /** * Output only. Timestamp when this Annotation was last updated. */ readonly updateTime?: Date; } /** * Identifies a concept with which DataItems may be annotated with. */ export interface GoogleCloudAiplatformV1AnnotationSpec { /** * Output only. Timestamp when this AnnotationSpec was created. */ readonly createTime?: Date; /** * Required. The user-defined name of the AnnotationSpec. The name can be up * to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Output only. Resource name of the AnnotationSpec. */ readonly name?: string; /** * Output only. Timestamp when AnnotationSpec was last updated. */ readonly updateTime?: Date; } /** * The generic reusable api auth config. */ export interface GoogleCloudAiplatformV1ApiAuth { /** * The API secret. */ apiKeyConfig?: GoogleCloudAiplatformV1ApiAuthApiKeyConfig; } /** * The API secret. */ export interface GoogleCloudAiplatformV1ApiAuthApiKeyConfig { /** * Required. The SecretManager secret version resource name storing API key. * e.g. projects/{project}/secrets/{secret}/versions/{version} */ apiKeySecretVersion?: string; } /** * Instance of a general artifact. */ export interface GoogleCloudAiplatformV1Artifact { /** * Output only. Timestamp when this Artifact was created. */ readonly createTime?: Date; /** * Description of the Artifact */ description?: string; /** * User provided display name of the Artifact. May be up to 128 Unicode * characters. */ displayName?: string; /** * An eTag used to perform consistent read-modify-write updates. If not set, * a blind "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Artifacts. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Artifact (System labels are excluded). */ labels?: { [key: string]: string }; /** * Properties of the Artifact. Top level metadata keys' heading and trailing * spaces will be trimmed. The size of this field should not exceed 200KB. */ metadata?: { [key: string]: any }; /** * Output only. The resource name of the Artifact. */ readonly name?: string; /** * The title of the schema describing the metadata. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaTitle?: string; /** * The version of the schema in schema_name to use. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaVersion?: string; /** * The state of this Artifact. This is a property of the Artifact, and does * not imply or capture any ongoing process. This property is managed by * clients (such as Vertex AI Pipelines), and the system does not prescribe or * check the validity of state transitions. */ state?: | "STATE_UNSPECIFIED" | "PENDING" | "LIVE"; /** * Output only. Timestamp when this Artifact was last updated. */ readonly updateTime?: Date; /** * The uniform resource identifier of the artifact file. May be empty if * there is no actual artifact file. */ uri?: string; } /** * Metadata information for NotebookService.AssignNotebookRuntime. */ export interface GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * A human-readable message that shows the intermediate progress details of * NotebookRuntime. */ progressMessage?: string; } /** * Request message for NotebookService.AssignNotebookRuntime. */ export interface GoogleCloudAiplatformV1AssignNotebookRuntimeRequest { /** * Required. Provide runtime specific information (e.g. runtime owner, * notebook id) used for NotebookRuntime assignment. */ notebookRuntime?: GoogleCloudAiplatformV1NotebookRuntime; /** * Optional. User specified ID for the notebook runtime. */ notebookRuntimeId?: string; /** * Required. The resource name of the NotebookRuntimeTemplate based on which * a NotebookRuntime will be assigned (reuse or create a new one). */ notebookRuntimeTemplate?: string; } /** * Attribution that explains a particular prediction output. */ export interface GoogleCloudAiplatformV1Attribution { /** * Output only. Error of feature_attributions caused by approximation used in * the explanation method. Lower value means more precise attributions. * For * Sampled Shapley attribution, increasing path_count might reduce the error. * * For Integrated Gradients attribution, increasing step_count might reduce * the error. * For XRAI attribution, increasing step_count might reduce the * error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for * more information. */ readonly approximationError?: number; /** * Output only. Model predicted output if the input instance is constructed * from the baselines of all the features defined in * ExplanationMetadata.inputs. The field name of the output is determined by * the key in ExplanationMetadata.outputs. If the Model's predicted output has * multiple dimensions (rank > 1), this is the value in the output located by * output_index. If there are multiple baselines, their output values are * averaged. */ readonly baselineOutputValue?: number; /** * Output only. Attributions of each explained feature. Features are * extracted from the prediction instances according to explanation metadata * for inputs. The value is a struct, whose keys are the name of the feature. * The values are how much the feature in the instance contributed to the * predicted result. The format of the value is determined by the feature's * input format: * If the feature is a scalar value, the attribution value is * a floating number. * If the feature is an array of scalar values, the * attribution value is an array. * If the feature is a struct, the * attribution value is a struct. The keys in the attribution value struct are * the same as the keys in the feature struct. The formats of the values in * the attribution struct are determined by the formats of the values in the * feature struct. The ExplanationMetadata.feature_attributions_schema_uri * field, pointed to by the ExplanationSpec field of the * Endpoint.deployed_models object, points to the schema file that describes * the features and their attribution values (if it is populated). */ readonly featureAttributions?: any; /** * Output only. Model predicted output on the corresponding explanation * instance. The field name of the output is determined by the key in * ExplanationMetadata.outputs. If the Model predicted output has multiple * dimensions, this is the value in the output located by output_index. */ readonly instanceOutputValue?: number; /** * Output only. The display name of the output identified by output_index. * For example, the predicted class name by a multi-classification Model. This * field is only populated iff the Model predicts display names as a separate * field along with the explained output. The predicted display name must has * the same shape of the explained output, and can be located using * output_index. */ readonly outputDisplayName?: string; /** * Output only. The index that locates the explained prediction output. If * the prediction output is a scalar value, output_index is not populated. If * the prediction output has multiple dimensions, the length of the * output_index list is the same as the number of dimensions of the output. * The i-th element in output_index is the element index of the i-th dimension * of the output vector. Indices start from 0. */ readonly outputIndex?: number[]; /** * Output only. Name of the explain output. Specified as the key in * ExplanationMetadata.outputs. */ readonly outputName?: string; } /** * Request message for AugmentPrompt. */ export interface GoogleCloudAiplatformV1AugmentPromptRequest { /** * Optional. Input content to augment, only text format is supported for now. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. Metadata of the backend deployed model. */ model?: GoogleCloudAiplatformV1AugmentPromptRequestModel; /** * Optional. Retrieves contexts from the Vertex RagStore. */ vertexRagStore?: GoogleCloudAiplatformV1VertexRagStore; } function serializeGoogleCloudAiplatformV1AugmentPromptRequest(data: any): GoogleCloudAiplatformV1AugmentPromptRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1AugmentPromptRequest(data: any): GoogleCloudAiplatformV1AugmentPromptRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } /** * Metadata of the backend deployed model. */ export interface GoogleCloudAiplatformV1AugmentPromptRequestModel { /** * Optional. The model that the user will send the augmented prompt for * content generation. */ model?: string; /** * Optional. The model version of the backend deployed model. */ modelVersion?: string; } /** * Response message for AugmentPrompt. */ export interface GoogleCloudAiplatformV1AugmentPromptResponse { /** * Augmented prompt, only text format is supported for now. */ augmentedPrompt?: GoogleCloudAiplatformV1Content[]; /** * Retrieved facts from RAG data sources. */ facts?: GoogleCloudAiplatformV1Fact[]; } function serializeGoogleCloudAiplatformV1AugmentPromptResponse(data: any): GoogleCloudAiplatformV1AugmentPromptResponse { return { ...data, augmentedPrompt: data["augmentedPrompt"] !== undefined ? data["augmentedPrompt"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1AugmentPromptResponse(data: any): GoogleCloudAiplatformV1AugmentPromptResponse { return { ...data, augmentedPrompt: data["augmentedPrompt"] !== undefined ? data["augmentedPrompt"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } /** * A description of resources that to large degree are decided by Vertex AI, * and require only a modest additional configuration. Each Model supporting * these resources documents its specific guidelines. */ export interface GoogleCloudAiplatformV1AutomaticResources { /** * Immutable. The maximum number of replicas this DeployedModel may be * deployed on when the traffic against it increases. If the requested value * is too large, the deployment will error, but if deployment succeeds then * the ability to scale the model to that many replicas is guaranteed (barring * service outages). If traffic against the DeployedModel increases beyond * what its replicas at maximum may handle, a portion of the traffic will be * dropped. If this value is not provided, a no upper bound for scaling under * heavy traffic will be assume, though Vertex AI may be unable to scale * beyond certain replica number. */ maxReplicaCount?: number; /** * Immutable. The minimum number of replicas this DeployedModel will be * always deployed on. If traffic against it increases, it may dynamically be * deployed onto more replicas up to max_replica_count, and as traffic * decreases, some of these extra replicas may be freed. If the requested * value is too large, the deployment will error. */ minReplicaCount?: number; } /** * The metric specification that defines the target resource utilization (CPU * utilization, accelerator's duty cycle, and so on) for calculating the desired * replica count. */ export interface GoogleCloudAiplatformV1AutoscalingMetricSpec { /** * Required. The resource metric name. Supported metrics: * For Online * Prediction: * * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * * `aiplatform.googleapis.com/prediction/online/cpu/utilization` */ metricName?: string; /** * The target resource utilization in percentage (1% - 100%) for the given * metric; once the real usage deviates from the target by a certain * percentage, the machine replicas change. The default value is 60 * (representing 60%) if not provided. */ target?: number; } /** * The storage details for Avro input content. */ export interface GoogleCloudAiplatformV1AvroSource { /** * Required. Google Cloud Storage location. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; } /** * Request message for PipelineService.BatchCancelPipelineJobs. */ export interface GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest { /** * Required. The names of the PipelineJobs to cancel. A maximum of 32 * PipelineJobs can be cancelled in a batch. Format: * `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}` */ names?: string[]; } /** * Details of operations that perform batch create Features. */ export interface GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata { /** * Operation metadata for Feature. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.BatchCreateFeatures. Request message * for FeatureRegistryService.BatchCreateFeatures. */ export interface GoogleCloudAiplatformV1BatchCreateFeaturesRequest { /** * Required. The request message specifying the Features to create. All * Features must be created under the same parent EntityType / FeatureGroup. * The `parent` field in each child request message can be omitted. If * `parent` is set in a child request, then the value must match the `parent` * value in this request message. */ requests?: GoogleCloudAiplatformV1CreateFeatureRequest[]; } /** * Response message for FeaturestoreService.BatchCreateFeatures. */ export interface GoogleCloudAiplatformV1BatchCreateFeaturesResponse { /** * The Features created. */ features?: GoogleCloudAiplatformV1Feature[]; } /** * Request message for TensorboardService.BatchCreateTensorboardRuns. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest { /** * Required. The request message specifying the TensorboardRuns to create. A * maximum of 1000 TensorboardRuns can be created in a batch. */ requests?: GoogleCloudAiplatformV1CreateTensorboardRunRequest[]; } /** * Response message for TensorboardService.BatchCreateTensorboardRuns. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse { /** * The created TensorboardRuns. */ tensorboardRuns?: GoogleCloudAiplatformV1TensorboardRun[]; } /** * Request message for TensorboardService.BatchCreateTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { /** * Required. The request message specifying the TensorboardTimeSeries to * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch. */ requests?: GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest[]; } function serializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { return { ...data, requests: data["requests"] !== undefined ? data["requests"].map((item: any) => (serializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { return { ...data, requests: data["requests"] !== undefined ? data["requests"].map((item: any) => (deserializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(item))) : undefined, }; } /** * Response message for TensorboardService.BatchCreateTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { /** * The created TensorboardTimeSeries. */ tensorboardTimeSeries?: GoogleCloudAiplatformV1TensorboardTimeSeries[]; } function serializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (serializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } /** * A description of resources that are used for performing batch operations, * are dedicated to a Model, and need manual configuration. */ export interface GoogleCloudAiplatformV1BatchDedicatedResources { /** * Required. Immutable. The specification of a single machine. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * Immutable. The maximum number of machine replicas the batch operation may * be scaled to. The default value is 10. */ maxReplicaCount?: number; /** * Immutable. The number of machine replicas used at the start of the batch * operation. If not set, Vertex AI decides starting number, not greater than * max_replica_count */ startingReplicaCount?: number; } /** * Request message for PipelineService.BatchDeletePipelineJobs. */ export interface GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest { /** * Required. The names of the PipelineJobs to delete. A maximum of 32 * PipelineJobs can be deleted in a batch. Format: * `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}` */ names?: string[]; } /** * Request message for ModelService.BatchImportEvaluatedAnnotations */ export interface GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest { /** * Required. Evaluated annotations resource to be imported. */ evaluatedAnnotations?: GoogleCloudAiplatformV1EvaluatedAnnotation[]; } /** * Response message for ModelService.BatchImportEvaluatedAnnotations */ export interface GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse { /** * Output only. Number of EvaluatedAnnotations imported. */ readonly importedEvaluatedAnnotationsCount?: number; } /** * Request message for ModelService.BatchImportModelEvaluationSlices */ export interface GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest { /** * Required. Model evaluation slice resource to be imported. */ modelEvaluationSlices?: GoogleCloudAiplatformV1ModelEvaluationSlice[]; } /** * Response message for ModelService.BatchImportModelEvaluationSlices */ export interface GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse { /** * Output only. List of imported ModelEvaluationSlice.name. */ readonly importedModelEvaluationSlices?: string[]; } /** * Runtime operation information for MigrationService.BatchMigrateResources. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Partial results that reflect the latest migration operation progress. */ partialResults?: GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult[]; } /** * Represents a partial result in batch migration operation for one * MigrateResourceRequest. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult { /** * Migrated dataset resource name. */ dataset?: string; /** * The error result of the migration request in case of failure. */ error?: GoogleRpcStatus; /** * Migrated model resource name. */ model?: string; /** * It's the same as the value in * BatchMigrateResourcesRequest.migrate_resource_requests. */ request?: GoogleCloudAiplatformV1MigrateResourceRequest; } /** * Request message for MigrationService.BatchMigrateResources. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesRequest { /** * Required. The request messages specifying the resources to migrate. They * must be in the same location as the destination. Up to 50 resources can be * migrated in one batch. */ migrateResourceRequests?: GoogleCloudAiplatformV1MigrateResourceRequest[]; } /** * Response message for MigrationService.BatchMigrateResources. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesResponse { /** * Successfully migrated resources. */ migrateResourceResponses?: GoogleCloudAiplatformV1MigrateResourceResponse[]; } /** * A job that uses a Model to produce predictions on multiple input instances. * If predictions for significant portion of the instances fail, the job may * finish without attempting predictions for all remaining instances. */ export interface GoogleCloudAiplatformV1BatchPredictionJob { /** * Output only. Statistics on completed and failed prediction instances. */ readonly completionStats?: GoogleCloudAiplatformV1CompletionStats; /** * Output only. Time when the BatchPredictionJob was created. */ readonly createTime?: Date; /** * The config of resources used by the Model during the batch prediction. If * the Model supports DEDICATED_RESOURCES this config may be provided (and the * job will use these resources), if the Model doesn't support * AUTOMATIC_RESOURCES, this config must be provided. */ dedicatedResources?: GoogleCloudAiplatformV1BatchDedicatedResources; /** * For custom-trained Models and AutoML Tabular Models, the container of the * DeployedModel instances will send `stderr` and `stdout` streams to Cloud * Logging by default. Please note that the logs incur cost, which are subject * to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User * can disable container logging by setting this flag to true. */ disableContainerLogging?: boolean; /** * Required. The user-defined name of this BatchPredictionJob. */ displayName?: string; /** * Customer-managed encryption key options for a BatchPredictionJob. If this * is set, then all resources created by the BatchPredictionJob will be * encrypted with the provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the BatchPredictionJob entered any of the following * states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when the job's state is JOB_STATE_FAILED or * JOB_STATE_CANCELLED. */ readonly error?: GoogleRpcStatus; /** * Explanation configuration for this BatchPredictionJob. Can be specified * only if generate_explanation is set to `true`. This value overrides the * value of Model.explanation_spec. All fields of explanation_spec are * optional in the request. If a field of the explanation_spec object is not * populated, the corresponding field of the Model.explanation_spec object is * inherited. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * Generate explanation with the batch prediction results. When set to * `true`, the batch prediction output changes based on the * `predictions_format` field of the BatchPredictionJob.output_config object: * * `bigquery`: output includes a column named `explanation`. The value is a * struct that conforms to the Explanation object. * `jsonl`: The JSON objects * on each line include an additional entry keyed `explanation`. The value of * the entry is a JSON object that conforms to the Explanation object. * * `csv`: Generating explanations for CSV format is not supported. If this * field is set to true, either the Model.explanation_spec or explanation_spec * must be populated. */ generateExplanation?: boolean; /** * Required. Input configuration of the instances on which predictions are * performed. The schema of any single instance may be specified via the * Model's PredictSchemata's instance_schema_uri. */ inputConfig?: GoogleCloudAiplatformV1BatchPredictionJobInputConfig; /** * Configuration for how to convert batch prediction input instances to the * prediction instances that are sent to the Model. */ instanceConfig?: GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig; /** * The labels with user-defined metadata to organize BatchPredictionJobs. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. */ labels?: { [key: string]: string }; /** * Immutable. Parameters configuring the batch behavior. Currently only * applicable when dedicated_resources are used (in other cases Vertex AI does * the tuning itself). */ manualBatchTuningParameters?: GoogleCloudAiplatformV1ManualBatchTuningParameters; /** * The name of the Model resource that produces the predictions via this job, * must share the same ancestor Location. Starting this job has no impact on * any existing deployments of the Model and their resources. Exactly one of * model and unmanaged_container_model must be set. The model resource name * may contain version id or version alias to specify the version. Example: * `projects/{project}/locations/{location}/models/{model}@2` or * `projects/{project}/locations/{location}/models/{model}@golden` if no * version is specified, the default version will be deployed. The model * resource could also be a publisher model. Example: * `publishers/{publisher}/models/{model}` or * `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` */ model?: string; /** * The parameters that govern the predictions. The schema of the parameters * may be specified via the Model's PredictSchemata's parameters_schema_uri. */ modelParameters?: any; /** * Output only. The version ID of the Model that produces the predictions via * this job. */ readonly modelVersionId?: string; /** * Output only. Resource name of the BatchPredictionJob. */ readonly name?: string; /** * Required. The Configuration specifying where output predictions should be * written. The schema of any single prediction may be specified as a * concatenation of Model's PredictSchemata's instance_schema_uri and * prediction_schema_uri. */ outputConfig?: GoogleCloudAiplatformV1BatchPredictionJobOutputConfig; /** * Output only. Information further describing the output of this job. */ readonly outputInfo?: GoogleCloudAiplatformV1BatchPredictionJobOutputInfo; /** * Output only. Partial failures encountered. For example, single files that * can't be read. This field never exceeds 20 entries. Status details fields * contain standard Google Cloud error details. */ readonly partialFailures?: GoogleRpcStatus[]; /** * Output only. Information about resources that had been consumed by this * job. Provided in real time at best effort basis, as well as a final value * once the job completes. Note: This field currently may be not populated for * batch predictions that use AutoML Models. */ readonly resourcesConsumed?: GoogleCloudAiplatformV1ResourcesConsumed; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * The service account that the DeployedModel's container runs as. If not * specified, a system generated one will be used, which has minimal * permissions and the custom container, if used, may not have enough * permission to access other Google Cloud resources. Users deploying the * Model must have the `iam.serviceAccounts.actAs` permission on this service * account. */ serviceAccount?: string; /** * Output only. Time when the BatchPredictionJob for the first time entered * the `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Contains model information necessary to perform batch prediction without * requiring uploading to model registry. Exactly one of model and * unmanaged_container_model must be set. */ unmanagedContainerModel?: GoogleCloudAiplatformV1UnmanagedContainerModel; /** * Output only. Time when the BatchPredictionJob was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1BatchPredictionJob(data: any): GoogleCloudAiplatformV1BatchPredictionJob { return { ...data, unmanagedContainerModel: data["unmanagedContainerModel"] !== undefined ? serializeGoogleCloudAiplatformV1UnmanagedContainerModel(data["unmanagedContainerModel"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchPredictionJob(data: any): GoogleCloudAiplatformV1BatchPredictionJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, unmanagedContainerModel: data["unmanagedContainerModel"] !== undefined ? deserializeGoogleCloudAiplatformV1UnmanagedContainerModel(data["unmanagedContainerModel"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Configures the input to BatchPredictionJob. See * Model.supported_input_storage_formats for Model's supported input formats, * and how instances should be expressed via any of them. */ export interface GoogleCloudAiplatformV1BatchPredictionJobInputConfig { /** * The BigQuery location of the input table. The schema of the table should * be in the format described by the given context OpenAPI Schema, if one is * provided. The table may contain additional columns that are not described * by the schema, and they will be ignored. */ bigquerySource?: GoogleCloudAiplatformV1BigQuerySource; /** * The Cloud Storage location for the input instances. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Required. The format in which instances are given, must be one of the * Model's supported_input_storage_formats. */ instancesFormat?: string; } /** * Configuration defining how to transform batch prediction input instances to * the instances that the Model accepts. */ export interface GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig { /** * Fields that will be excluded in the prediction instance that is sent to * the Model. Excluded will be attached to the batch prediction output if * key_field is not specified. When excluded_fields is populated, * included_fields must be empty. The input must be JSONL with objects at each * line, BigQuery or TfRecord. */ excludedFields?: string[]; /** * Fields that will be included in the prediction instance that is sent to * the Model. If instance_type is `array`, the order of field names in * included_fields also determines the order of the values in the array. When * included_fields is populated, excluded_fields must be empty. The input must * be JSONL with objects at each line, BigQuery or TfRecord. */ includedFields?: string[]; /** * The format of the instance that the Model accepts. Vertex AI will convert * compatible batch prediction input instance formats to the specified format. * Supported values are: * `object`: Each input is converted to JSON object * format. * For `bigquery`, each row is converted to an object. * For * `jsonl`, each line of the JSONL input must be an object. * Does not apply * to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each * input is converted to JSON array format. * For `bigquery`, each row is * converted to an array. The order of columns is determined by the BigQuery * column order, unless included_fields is populated. included_fields must be * populated for specifying field orders. * For `jsonl`, if each line of the * JSONL input is an object, included_fields must be populated for specifying * field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or * `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction * input as follows: * For `bigquery` and `csv`, the behavior is the same as * `array`. The order of columns is the same as defined in the file or table, * unless included_fields is populated. * For `jsonl`, the prediction instance * format is determined by each line of the input. * For * `tf-record`/`tf-record-gzip`, each record will be converted to an object in * the format of `{"b64": }`, where `` is the Base64-encoded string of the * content of the record. * For `file-list`, each file in the list will be * converted to an object in the format of `{"b64": }`, where `` is the * Base64-encoded string of the content of the file. */ instanceType?: string; /** * The name of the field that is considered as a key. The values identified * by the key field is not included in the transformed instances that is sent * to the Model. This is similar to specifying this name of the field in * excluded_fields. In addition, the batch prediction output will not include * the instances. Instead the output will only include the value of the key * field, in a field named `key` in the output: * For `jsonl` output format, * the output will have a `key` field instead of the `instance` field. * For * `csv`/`bigquery` output format, the output will have have a `key` column * instead of the instance feature columns. The input must be JSONL with * objects at each line, CSV, BigQuery or TfRecord. */ keyField?: string; } /** * Configures the output of BatchPredictionJob. See * Model.supported_output_storage_formats for supported output formats, and how * predictions are expressed via any of them. */ export interface GoogleCloudAiplatformV1BatchPredictionJobOutputConfig { /** * The BigQuery project or dataset location where the output is to be written * to. If project is provided, a new dataset is created with name * `prediction__` where is made BigQuery-dataset-name compatible (for example, * most special characters become underscores), and timestamp is in * YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two * tables will be created, `predictions`, and `errors`. If the Model has both * instance and prediction schemata defined then the tables have columns as * follows: The `predictions` table contains instances for which the * prediction succeeded, it has columns as per a concatenation of the Model's * instance and prediction schemata. The `errors` table contains rows for * which the prediction has failed, it has instance columns, as per the * instance schema, followed by a single "errors" column, which as values has * google.rpc.Status represented as a STRUCT, and containing only `code` and * `message`. */ bigqueryDestination?: GoogleCloudAiplatformV1BigQueryDestination; /** * The Cloud Storage location of the directory where the output is to be * written to. In the given directory a new directory is created. Its name is * `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 * format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., * `predictions_N.` are created where `` depends on chosen predictions_format, * and N may equal 0001 and depends on the total number of successfully * predicted instances. If the Model has both instance and prediction schemata * defined then each such file contains predictions as per the * predictions_format. If prediction for any instance failed (partially or * completely), then an additional `errors_0001.`, `errors_0002.`,..., * `errors_N.` files are created (N depends on total number of failed * predictions). These files contain the failed instances, as per their * schema, followed by an additional `error` field which as value has * google.rpc.Status containing only `code` and `message` fields. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; /** * Required. The format in which Vertex AI gives the predictions, must be one * of the Model's supported_output_storage_formats. */ predictionsFormat?: string; } /** * Further describes this job's output. Supplements output_config. */ export interface GoogleCloudAiplatformV1BatchPredictionJobOutputInfo { /** * Output only. The path of the BigQuery dataset created, in * `bq://projectId.bqDatasetId` format, into which the prediction output is * written. */ readonly bigqueryOutputDataset?: string; /** * Output only. The name of the BigQuery table created, in `predictions_` * format, into which the prediction output is written. Can be used by UI to * generate the BigQuery output path, for example. */ readonly bigqueryOutputTable?: string; /** * Output only. The full path of the Cloud Storage directory created, into * which the prediction output is written. */ readonly gcsOutputDirectory?: string; } /** * Details of operations that batch reads Feature values. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata { /** * Operation metadata for Featurestore batch read Features values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.BatchReadFeatureValues. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { /** * Similar to csv_read_instances, but from BigQuery source. */ bigqueryReadInstances?: GoogleCloudAiplatformV1BigQuerySource; /** * Each read instance consists of exactly one read timestamp and one or more * entity IDs identifying entities of the corresponding EntityTypes whose * Features are requested. Each output instance contains Feature values of * requested entities concatenated together as of the read time. An example * read instance may be `foo_entity_id, bar_entity_id, * 2020-01-01T10:00:00.123Z`. An example output instance may be * `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, * foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each * read instance must be millisecond-aligned. `csv_read_instances` are read * instances stored in a plain-text CSV file. The header should be: * [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in * any order. Values in the timestamp column must use the RFC 3339 format, * e.g. `2012-07-30T10:43:17.123Z`. */ csvReadInstances?: GoogleCloudAiplatformV1CsvSource; /** * Required. Specifies output location and format. */ destination?: GoogleCloudAiplatformV1FeatureValueDestination; /** * Required. Specifies EntityType grouping Features to read values of and * settings. */ entityTypeSpecs?: GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec[]; /** * When not empty, the specified fields in the *_read_instances source will * be joined as-is in the output, in addition to those fields from the * Featurestore Entity. For BigQuery source, the type of the pass-through * values will be automatically inferred. For CSV source, the pass-through * values will be passed as opaque bytes. */ passThroughFields?: GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField[]; /** * Optional. Excludes Feature values with feature generation timestamp before * this timestamp. If not set, retrieve oldest values kept in Feature Store. * Timestamp, if present, must not have higher than millisecond precision. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1BatchReadFeatureValuesRequest(data: any): GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { return { ...data, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchReadFeatureValuesRequest(data: any): GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { return { ...data, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Selects Features of an EntityType to read values of and specifies read * settings. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec { /** * Required. ID of the EntityType to select Features. The EntityType id is * the entity_type_id specified during EntityType creation. */ entityTypeId?: string; /** * Required. Selectors choosing which Feature values to read from the * EntityType. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; /** * Per-Feature settings for the batch read. */ settings?: GoogleCloudAiplatformV1DestinationFeatureSetting[]; } /** * Describe pass-through fields in read_instance source. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField { /** * Required. The name of the field in the CSV header or the name of the * column in BigQuery table. The naming restriction is the same as * Feature.name. */ fieldName?: string; } /** * Response message for FeaturestoreService.BatchReadFeatureValues. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesResponse { } /** * Response message for TensorboardService.BatchReadTensorboardTimeSeriesData. */ export interface GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { /** * The returned time series data. */ timeSeriesData?: GoogleCloudAiplatformV1TimeSeriesData[]; } function serializeGoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? data["timeSeriesData"].map((item: any) => (serializeGoogleCloudAiplatformV1TimeSeriesData(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? data["timeSeriesData"].map((item: any) => (deserializeGoogleCloudAiplatformV1TimeSeriesData(item))) : undefined, }; } /** * The BigQuery location for the output content. */ export interface GoogleCloudAiplatformV1BigQueryDestination { /** * Required. BigQuery URI to a project or table, up to 2000 characters long. * When only the project is specified, the Dataset and Table is created. When * the full table reference is specified, the Dataset must exist and table * must not exist. Accepted forms: * BigQuery path. For example: * `bq://projectId` or `bq://projectId.bqDatasetId` or * `bq://projectId.bqDatasetId.bqTableId`. */ outputUri?: string; } /** * The BigQuery location for the input content. */ export interface GoogleCloudAiplatformV1BigQuerySource { /** * Required. BigQuery URI to a table, up to 2000 characters long. Accepted * forms: * BigQuery path. For example: * `bq://projectId.bqDatasetId.bqTableId`. */ inputUri?: string; } /** * Input for bleu metric. */ export interface GoogleCloudAiplatformV1BleuInput { /** * Required. Repeated bleu instances. */ instances?: GoogleCloudAiplatformV1BleuInstance[]; /** * Required. Spec for bleu score metric. */ metricSpec?: GoogleCloudAiplatformV1BleuSpec; } /** * Spec for bleu instance. */ export interface GoogleCloudAiplatformV1BleuInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Bleu metric value for an instance. */ export interface GoogleCloudAiplatformV1BleuMetricValue { /** * Output only. Bleu score. */ readonly score?: number; } /** * Results for bleu metric. */ export interface GoogleCloudAiplatformV1BleuResults { /** * Output only. Bleu metric values. */ readonly bleuMetricValues?: GoogleCloudAiplatformV1BleuMetricValue[]; } /** * Spec for bleu score metric - calculates the precision of n-grams in the * prediction as compared to reference - returns a score ranging between 0 to 1. */ export interface GoogleCloudAiplatformV1BleuSpec { /** * Optional. Whether to use_effective_order to compute bleu score. */ useEffectiveOrder?: boolean; } /** * Content blob. It's preferred to send as text directly rather than raw bytes. */ export interface GoogleCloudAiplatformV1Blob { /** * Required. Raw bytes. */ data?: Uint8Array; /** * Required. The IANA standard MIME type of the source data. */ mimeType?: string; } function serializeGoogleCloudAiplatformV1Blob(data: any): GoogleCloudAiplatformV1Blob { return { ...data, data: data["data"] !== undefined ? encodeBase64(data["data"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Blob(data: any): GoogleCloudAiplatformV1Blob { return { ...data, data: data["data"] !== undefined ? decodeBase64(data["data"] as string) : undefined, }; } /** * Config for blur baseline. When enabled, a linear path from the maximally * blurred image to the input image is created. Using a blurred baseline instead * of zero (black image) is motivated by the BlurIG approach explained here: * https://arxiv.org/abs/2004.03383 */ export interface GoogleCloudAiplatformV1BlurBaselineConfig { /** * The standard deviation of the blur kernel for the blurred baseline. The * same blurring parameter is used for both the height and the width * dimension. If not set, the method defaults to the zero (i.e. black for * images) baseline. */ maxBlurSigma?: number; } /** * A list of boolean values. */ export interface GoogleCloudAiplatformV1BoolArray { /** * A list of bool values. */ values?: boolean[]; } /** * Config of GenAI caching features. This is a singleton resource. */ export interface GoogleCloudAiplatformV1CacheConfig { /** * If set to true, disables GenAI caching. Otherwise caching is enabled. */ disableCache?: boolean; /** * Identifier. Name of the cache config. Format: - * `projects/{project}/cacheConfig`. */ name?: string; } /** * A resource used in LLM queries for users to explicitly specify what to cache * and how to cache. */ export interface GoogleCloudAiplatformV1CachedContent { /** * Optional. Input only. Immutable. The content to cache */ contents?: GoogleCloudAiplatformV1Content[]; /** * Output only. Creatation time of the cache entry. */ readonly createTime?: Date; /** * Optional. Immutable. The user-generated meaningful display name of the * cached content. */ displayName?: string; /** * Timestamp of when this resource is considered expired. This is *always* * provided on output, regardless of what was sent on input. */ expireTime?: Date; /** * Immutable. The name of the publisher model to use for cached content. * Format: * projects/{project}/locations/{location}/publishers/{publisher}/models/{model} */ model?: string; /** * Immutable. Identifier. The server-generated resource name of the cached * content Format: * projects/{project}/locations/{location}/cachedContents/{cached_content} */ name?: string; /** * Optional. Input only. Immutable. Developer set system instruction. * Currently, text only */ systemInstruction?: GoogleCloudAiplatformV1Content; /** * Optional. Input only. Immutable. Tool config. This config is shared for * all tools */ toolConfig?: GoogleCloudAiplatformV1ToolConfig; /** * Optional. Input only. Immutable. A list of `Tools` the model may use to * generate the next response */ tools?: GoogleCloudAiplatformV1Tool[]; /** * Input only. The TTL for this resource. The expiration time is computed: * now + TTL. */ ttl?: number /* Duration */; /** * Output only. When the cache entry was last updated in UTC time. */ readonly updateTime?: Date; /** * Output only. Metadata on the usage of the cached content. */ readonly usageMetadata?: GoogleCloudAiplatformV1CachedContentUsageMetadata; } function serializeGoogleCloudAiplatformV1CachedContent(data: any): GoogleCloudAiplatformV1CachedContent { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, expireTime: data["expireTime"] !== undefined ? data["expireTime"].toISOString() : undefined, systemInstruction: data["systemInstruction"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (serializeGoogleCloudAiplatformV1Tool(item))) : undefined, ttl: data["ttl"] !== undefined ? data["ttl"] : undefined, }; } function deserializeGoogleCloudAiplatformV1CachedContent(data: any): GoogleCloudAiplatformV1CachedContent { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, expireTime: data["expireTime"] !== undefined ? new Date(data["expireTime"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tool(item))) : undefined, ttl: data["ttl"] !== undefined ? data["ttl"] : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Metadata on the usage of the cached content. */ export interface GoogleCloudAiplatformV1CachedContentUsageMetadata { /** * Duration of audio in seconds. */ audioDurationSeconds?: number; /** * Number of images. */ imageCount?: number; /** * Number of text characters. */ textCount?: number; /** * Total number of tokens that the cached content consumes. */ totalTokenCount?: number; /** * Duration of video in seconds. */ videoDurationSeconds?: number; } /** * Request message for JobService.CancelBatchPredictionJob. */ export interface GoogleCloudAiplatformV1CancelBatchPredictionJobRequest { } /** * Request message for JobService.CancelCustomJob. */ export interface GoogleCloudAiplatformV1CancelCustomJobRequest { } /** * Request message for JobService.CancelDataLabelingJob. */ export interface GoogleCloudAiplatformV1CancelDataLabelingJobRequest { } /** * Request message for JobService.CancelHyperparameterTuningJob. */ export interface GoogleCloudAiplatformV1CancelHyperparameterTuningJobRequest { } /** * Request message for JobService.CancelNasJob. */ export interface GoogleCloudAiplatformV1CancelNasJobRequest { } /** * Request message for PipelineService.CancelPipelineJob. */ export interface GoogleCloudAiplatformV1CancelPipelineJobRequest { } /** * Request message for PipelineService.CancelTrainingPipeline. */ export interface GoogleCloudAiplatformV1CancelTrainingPipelineRequest { } /** * Request message for GenAiTuningService.CancelTuningJob. */ export interface GoogleCloudAiplatformV1CancelTuningJobRequest { } /** * A response candidate generated from the model. */ export interface GoogleCloudAiplatformV1Candidate { /** * Output only. Average log probability score of the candidate. */ readonly avgLogprobs?: number; /** * Output only. Source attribution of the generated content. */ readonly citationMetadata?: GoogleCloudAiplatformV1CitationMetadata; /** * Output only. Content parts of the candidate. */ readonly content?: GoogleCloudAiplatformV1Content; /** * Output only. Describes the reason the mode stopped generating tokens in * more detail. This is only filled when `finish_reason` is set. */ readonly finishMessage?: string; /** * Output only. The reason why the model stopped generating tokens. If empty, * the model has not stopped generating the tokens. */ readonly finishReason?: | "FINISH_REASON_UNSPECIFIED" | "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER" | "BLOCKLIST" | "PROHIBITED_CONTENT" | "SPII" | "MALFORMED_FUNCTION_CALL"; /** * Output only. Metadata specifies sources used to ground generated content. */ readonly groundingMetadata?: GoogleCloudAiplatformV1GroundingMetadata; /** * Output only. Index of the candidate. */ readonly index?: number; /** * Output only. Log-likelihood scores for the response tokens and top tokens */ readonly logprobsResult?: GoogleCloudAiplatformV1LogprobsResult; /** * Output only. List of ratings for the safety of a response candidate. There * is at most one rating per category. */ readonly safetyRatings?: GoogleCloudAiplatformV1SafetyRating[]; } /** * This message will be placed in the metadata field of a * google.longrunning.Operation associated with a CheckTrialEarlyStoppingState * request. */ export interface GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateMetatdata { /** * Operation metadata for suggesting Trials. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * The name of the Study that the Trial belongs to. */ study?: string; /** * The Trial name. */ trial?: string; } /** * Request message for VizierService.CheckTrialEarlyStoppingState. */ export interface GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateRequest { } /** * Response message for VizierService.CheckTrialEarlyStoppingState. */ export interface GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateResponse { /** * True if the Trial should stop. */ shouldStop?: boolean; } /** * Source attributions for content. */ export interface GoogleCloudAiplatformV1Citation { /** * Output only. End index into the content. */ readonly endIndex?: number; /** * Output only. License of the attribution. */ readonly license?: string; /** * Output only. Publication date of the attribution. */ readonly publicationDate?: GoogleTypeDate; /** * Output only. Start index into the content. */ readonly startIndex?: number; /** * Output only. Title of the attribution. */ readonly title?: string; /** * Output only. Url reference of the attribution. */ readonly uri?: string; } /** * A collection of source attributions for a piece of content. */ export interface GoogleCloudAiplatformV1CitationMetadata { /** * Output only. List of citations. */ readonly citations?: GoogleCloudAiplatformV1Citation[]; } /** * Claim that is extracted from the input text and facts that support it. */ export interface GoogleCloudAiplatformV1Claim { /** * Index in the input text where the claim ends (exclusive). */ endIndex?: number; /** * Indexes of the facts supporting this claim. */ factIndexes?: number[]; /** * Confidence score of this corroboration. */ score?: number; /** * Index in the input text where the claim starts (inclusive). */ startIndex?: number; } /** * Configurations (e.g. inference timeout) that are applied on your endpoints. */ export interface GoogleCloudAiplatformV1ClientConnectionConfig { /** * Customizable online prediction request timeout. */ inferenceTimeout?: number /* Duration */; } function serializeGoogleCloudAiplatformV1ClientConnectionConfig(data: any): GoogleCloudAiplatformV1ClientConnectionConfig { return { ...data, inferenceTimeout: data["inferenceTimeout"] !== undefined ? data["inferenceTimeout"] : undefined, }; } function deserializeGoogleCloudAiplatformV1ClientConnectionConfig(data: any): GoogleCloudAiplatformV1ClientConnectionConfig { return { ...data, inferenceTimeout: data["inferenceTimeout"] !== undefined ? data["inferenceTimeout"] : undefined, }; } /** * Input for coherence metric. */ export interface GoogleCloudAiplatformV1CoherenceInput { /** * Required. Coherence instance. */ instance?: GoogleCloudAiplatformV1CoherenceInstance; /** * Required. Spec for coherence score metric. */ metricSpec?: GoogleCloudAiplatformV1CoherenceSpec; } /** * Spec for coherence instance. */ export interface GoogleCloudAiplatformV1CoherenceInstance { /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for coherence result. */ export interface GoogleCloudAiplatformV1CoherenceResult { /** * Output only. Confidence for coherence score. */ readonly confidence?: number; /** * Output only. Explanation for coherence score. */ readonly explanation?: string; /** * Output only. Coherence score. */ readonly score?: number; } /** * Spec for coherence score metric. */ export interface GoogleCloudAiplatformV1CoherenceSpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for Comet metric. */ export interface GoogleCloudAiplatformV1CometInput { /** * Required. Comet instance. */ instance?: GoogleCloudAiplatformV1CometInstance; /** * Required. Spec for comet metric. */ metricSpec?: GoogleCloudAiplatformV1CometSpec; } /** * Spec for Comet instance - The fields used for evaluation are dependent on * the comet version. */ export interface GoogleCloudAiplatformV1CometInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; /** * Optional. Source text in original language. */ source?: string; } /** * Spec for Comet result - calculates the comet score for the given instance * using the version specified in the spec. */ export interface GoogleCloudAiplatformV1CometResult { /** * Output only. Comet score. Range depends on version. */ readonly score?: number; } /** * Spec for Comet metric. */ export interface GoogleCloudAiplatformV1CometSpec { /** * Optional. Source language in BCP-47 format. */ sourceLanguage?: string; /** * Optional. Target language in BCP-47 format. Covers both prediction and * reference. */ targetLanguage?: string; /** * Required. Which version to use for evaluation. */ version?: | "COMET_VERSION_UNSPECIFIED" | "COMET_22_SRC_REF"; } /** * Request message for VizierService.CompleteTrial. */ export interface GoogleCloudAiplatformV1CompleteTrialRequest { /** * Optional. If provided, it will be used as the completed Trial's * final_measurement; Otherwise, the service will auto-select a previously * reported measurement as the final-measurement */ finalMeasurement?: GoogleCloudAiplatformV1Measurement; /** * Optional. A human readable reason why the trial was infeasible. This * should only be provided if `trial_infeasible` is true. */ infeasibleReason?: string; /** * Optional. True if the Trial cannot be run with the given Parameter, and * final_measurement will be ignored. */ trialInfeasible?: boolean; } /** * Success and error statistics of processing multiple entities (for example, * DataItems or structured data rows) in batch. */ export interface GoogleCloudAiplatformV1CompletionStats { /** * Output only. The number of entities for which any error was encountered. */ readonly failedCount?: bigint; /** * Output only. In cases when enough errors are encountered a job, pipeline, * or operation may be failed as a whole. Below is the number of entities for * which the processing had not been finished (either in successful or failed * state). Set to -1 if the number is unknown (for example, the operation * failed before the total entity number could be collected). */ readonly incompleteCount?: bigint; /** * Output only. The number of entities that had been processed successfully. */ readonly successfulCount?: bigint; /** * Output only. The number of the successful forecast points that are * generated by the forecasting model. This is ONLY used by the forecasting * batch prediction. */ readonly successfulForecastPointCount?: bigint; } /** * Request message for ComputeTokens RPC call. */ export interface GoogleCloudAiplatformV1ComputeTokensRequest { /** * Optional. Input content. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. The instances that are the input to token computing API call. * Schema is identical to the prediction schema of the text model, even for * the non-text models, like chat models, or Codey models. */ instances?: any[]; /** * Optional. The name of the publisher model requested to serve the * prediction. Format: * projects/{project}/locations/{location}/publishers/*\/models/* */ model?: string; } function serializeGoogleCloudAiplatformV1ComputeTokensRequest(data: any): GoogleCloudAiplatformV1ComputeTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ComputeTokensRequest(data: any): GoogleCloudAiplatformV1ComputeTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } /** * Response message for ComputeTokens RPC call. */ export interface GoogleCloudAiplatformV1ComputeTokensResponse { /** * Lists of tokens info from the input. A ComputeTokensRequest could have * multiple instances with a prompt in each instance. We also need to return * lists of tokens info for the request with multiple instances. */ tokensInfo?: GoogleCloudAiplatformV1TokensInfo[]; } function serializeGoogleCloudAiplatformV1ComputeTokensResponse(data: any): GoogleCloudAiplatformV1ComputeTokensResponse { return { ...data, tokensInfo: data["tokensInfo"] !== undefined ? data["tokensInfo"].map((item: any) => (serializeGoogleCloudAiplatformV1TokensInfo(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data: any): GoogleCloudAiplatformV1ComputeTokensResponse { return { ...data, tokensInfo: data["tokensInfo"] !== undefined ? data["tokensInfo"].map((item: any) => (deserializeGoogleCloudAiplatformV1TokensInfo(item))) : undefined, }; } /** * The Container Registry location for the container image. */ export interface GoogleCloudAiplatformV1ContainerRegistryDestination { /** * Required. Container Registry URI of a container image. Only Google * Container Registry and Artifact Registry are supported now. Accepted forms: * * Google Container Registry path. For example: * `gcr.io/projectId/imageName:tag`. * Artifact Registry path. For example: * `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. If a tag is * not specified, "latest" will be used as the default tag. */ outputUri?: string; } /** * The spec of a Container. */ export interface GoogleCloudAiplatformV1ContainerSpec { /** * The arguments to be passed when starting the container. */ args?: string[]; /** * The command to be invoked when the container is started. It overrides the * entrypoint instruction in Dockerfile when provided. */ command?: string[]; /** * Environment variables to be passed to the container. Maximum limit is 100. */ env?: GoogleCloudAiplatformV1EnvVar[]; /** * Required. The URI of a container image in the Container Registry that is * to be run on each worker replica. */ imageUri?: string; } /** * The base structured datatype containing multi-part content of a message. A * `Content` includes a `role` field designating the producer of the `Content` * and a `parts` field containing multi-part data that contains the content of * the message turn. */ export interface GoogleCloudAiplatformV1Content { /** * Required. Ordered `Parts` that constitute a single message. Parts may have * different IANA MIME types. */ parts?: GoogleCloudAiplatformV1Part[]; /** * Optional. The producer of the content. Must be either 'user' or 'model'. * Useful to set for multi-turn conversations, otherwise can be left blank or * unset. */ role?: string; } function serializeGoogleCloudAiplatformV1Content(data: any): GoogleCloudAiplatformV1Content { return { ...data, parts: data["parts"] !== undefined ? data["parts"].map((item: any) => (serializeGoogleCloudAiplatformV1Part(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1Content(data: any): GoogleCloudAiplatformV1Content { return { ...data, parts: data["parts"] !== undefined ? data["parts"].map((item: any) => (deserializeGoogleCloudAiplatformV1Part(item))) : undefined, }; } /** * Instance of a general context. */ export interface GoogleCloudAiplatformV1Context { /** * Output only. Timestamp when this Context was created. */ readonly createTime?: Date; /** * Description of the Context */ description?: string; /** * User provided display name of the Context. May be up to 128 Unicode * characters. */ displayName?: string; /** * An eTag used to perform consistent read-modify-write updates. If not set, * a blind "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Contexts. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Context (System labels are excluded). */ labels?: { [key: string]: string }; /** * Properties of the Context. Top level metadata keys' heading and trailing * spaces will be trimmed. The size of this field should not exceed 200KB. */ metadata?: { [key: string]: any }; /** * Immutable. The resource name of the Context. */ name?: string; /** * Output only. A list of resource names of Contexts that are parents of this * Context. A Context may have at most 10 parent_contexts. */ readonly parentContexts?: string[]; /** * The title of the schema describing the metadata. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaTitle?: string; /** * The version of the schema in schema_name to use. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaVersion?: string; /** * Output only. Timestamp when this Context was last updated. */ readonly updateTime?: Date; } /** * Details of ModelService.CopyModel operation. */ export interface GoogleCloudAiplatformV1CopyModelOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for ModelService.CopyModel. */ export interface GoogleCloudAiplatformV1CopyModelRequest { /** * Customer-managed encryption key options. If this is set, then the Model * copy will be encrypted with the provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Optional. Copy source_model into a new Model with this ID. The ID will * become the final component of the model resource name. This value may be up * to 63 characters, and valid characters are `[a-z0-9_-]`. The first * character cannot be a number or hyphen. */ modelId?: string; /** * Optional. Specify this field to copy source_model into this existing Model * as a new version. Format: * `projects/{project}/locations/{location}/models/{model}` */ parentModel?: string; /** * Required. The resource name of the Model to copy. That Model must be in * the same Project. Format: * `projects/{project}/locations/{location}/models/{model}` */ sourceModel?: string; } /** * Response message of ModelService.CopyModel operation. */ export interface GoogleCloudAiplatformV1CopyModelResponse { /** * The name of the copied Model resource. Format: * `projects/{project}/locations/{location}/models/{model}` */ model?: string; /** * Output only. The version ID of the model that is copied. */ readonly modelVersionId?: string; } /** * RagCorpus status. */ export interface GoogleCloudAiplatformV1CorpusStatus { /** * Output only. Only when the `state` field is ERROR. */ readonly errorStatus?: string; /** * Output only. RagCorpus life state. */ readonly state?: | "UNKNOWN" | "INITIALIZED" | "ACTIVE" | "ERROR"; } /** * Request message for CorroborateContent. */ export interface GoogleCloudAiplatformV1CorroborateContentRequest { /** * Optional. Input content to corroborate, only text format is supported for * now. */ content?: GoogleCloudAiplatformV1Content; /** * Optional. Facts used to generate the text can also be used to corroborate * the text. */ facts?: GoogleCloudAiplatformV1Fact[]; /** * Optional. Parameters that can be set to override default settings per * request. */ parameters?: GoogleCloudAiplatformV1CorroborateContentRequestParameters; } function serializeGoogleCloudAiplatformV1CorroborateContentRequest(data: any): GoogleCloudAiplatformV1CorroborateContentRequest { return { ...data, content: data["content"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["content"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CorroborateContentRequest(data: any): GoogleCloudAiplatformV1CorroborateContentRequest { return { ...data, content: data["content"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["content"]) : undefined, }; } /** * Parameters that can be overrided per request. */ export interface GoogleCloudAiplatformV1CorroborateContentRequestParameters { /** * Optional. Only return claims with citation score larger than the * threshold. */ citationThreshold?: number; } /** * Response message for CorroborateContent. */ export interface GoogleCloudAiplatformV1CorroborateContentResponse { /** * Claims that are extracted from the input content and facts that support * the claims. */ claims?: GoogleCloudAiplatformV1Claim[]; /** * Confidence score of corroborating content. Value is [0,1] with 1 is the * most confidence. */ corroborationScore?: number; } /** * Request message for PredictionService.CountTokens. */ export interface GoogleCloudAiplatformV1CountTokensRequest { /** * Optional. Input content. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. Generation config that the model will use to generate the * response. */ generationConfig?: GoogleCloudAiplatformV1GenerationConfig; /** * Optional. The instances that are the input to token counting call. Schema * is identical to the prediction schema of the underlying model. */ instances?: any[]; /** * Optional. The name of the publisher model requested to serve the * prediction. Format: * `projects/{project}/locations/{location}/publishers/*\/models/*` */ model?: string; /** * Optional. The user provided system instructions for the model. Note: only * text should be used in parts and content in each part will be in a separate * paragraph. */ systemInstruction?: GoogleCloudAiplatformV1Content; /** * Optional. A list of `Tools` the model may use to generate the next * response. A `Tool` is a piece of code that enables the system to interact * with external systems to perform an action, or set of actions, outside of * knowledge and scope of the model. */ tools?: GoogleCloudAiplatformV1Tool[]; } function serializeGoogleCloudAiplatformV1CountTokensRequest(data: any): GoogleCloudAiplatformV1CountTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? serializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (serializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1CountTokensRequest(data: any): GoogleCloudAiplatformV1CountTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } /** * Response message for PredictionService.CountTokens. */ export interface GoogleCloudAiplatformV1CountTokensResponse { /** * The total number of billable characters counted across all instances from * the request. */ totalBillableCharacters?: number; /** * The total number of tokens counted across all instances from the request. */ totalTokens?: number; } /** * Runtime operation information for DatasetService.CreateDataset. */ export interface GoogleCloudAiplatformV1CreateDatasetOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for DatasetService.CreateDatasetVersion. */ export interface GoogleCloudAiplatformV1CreateDatasetVersionOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for CreateDeploymentResourcePool method. */ export interface GoogleCloudAiplatformV1CreateDeploymentResourcePoolOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for CreateDeploymentResourcePool method. */ export interface GoogleCloudAiplatformV1CreateDeploymentResourcePoolRequest { /** * Required. The DeploymentResourcePool to create. */ deploymentResourcePool?: GoogleCloudAiplatformV1DeploymentResourcePool; /** * Required. The ID to use for the DeploymentResourcePool, which will become * the final component of the DeploymentResourcePool's resource name. The * maximum length is 63 characters, and valid characters are * `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. */ deploymentResourcePoolId?: string; } /** * Runtime operation information for EndpointService.CreateEndpoint. */ export interface GoogleCloudAiplatformV1CreateEndpointOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create EntityType. */ export interface GoogleCloudAiplatformV1CreateEntityTypeOperationMetadata { /** * Operation metadata for EntityType. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create FeatureGroup. */ export interface GoogleCloudAiplatformV1CreateFeatureGroupOperationMetadata { /** * Operation metadata for FeatureGroup. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create FeatureOnlineStore. */ export interface GoogleCloudAiplatformV1CreateFeatureOnlineStoreOperationMetadata { /** * Operation metadata for FeatureOnlineStore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create Feature. */ export interface GoogleCloudAiplatformV1CreateFeatureOperationMetadata { /** * Operation metadata for Feature. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.CreateFeature. Request message for * FeatureRegistryService.CreateFeature. */ export interface GoogleCloudAiplatformV1CreateFeatureRequest { /** * Required. The Feature to create. */ feature?: GoogleCloudAiplatformV1Feature; /** * Required. The ID to use for the Feature, which will become the final * component of the Feature's resource name. This value may be up to 128 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within an * EntityType/FeatureGroup. */ featureId?: string; /** * Required. The resource name of the EntityType or FeatureGroup to create a * Feature. Format for entity_type as parent: * `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` * Format for feature_group as parent: * `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ parent?: string; } /** * Details of operations that perform create Featurestore. */ export interface GoogleCloudAiplatformV1CreateFeaturestoreOperationMetadata { /** * Operation metadata for Featurestore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create FeatureView. */ export interface GoogleCloudAiplatformV1CreateFeatureViewOperationMetadata { /** * Operation metadata for FeatureView Create. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for IndexEndpointService.CreateIndexEndpoint. */ export interface GoogleCloudAiplatformV1CreateIndexEndpointOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for IndexService.CreateIndex. */ export interface GoogleCloudAiplatformV1CreateIndexOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * The operation metadata with regard to Matching Engine Index operation. */ nearestNeighborSearchOperationMetadata?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata; } function serializeGoogleCloudAiplatformV1CreateIndexOperationMetadata(data: any): GoogleCloudAiplatformV1CreateIndexOperationMetadata { return { ...data, nearestNeighborSearchOperationMetadata: data["nearestNeighborSearchOperationMetadata"] !== undefined ? serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data["nearestNeighborSearchOperationMetadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreateIndexOperationMetadata(data: any): GoogleCloudAiplatformV1CreateIndexOperationMetadata { return { ...data, nearestNeighborSearchOperationMetadata: data["nearestNeighborSearchOperationMetadata"] !== undefined ? deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data["nearestNeighborSearchOperationMetadata"]) : undefined, }; } /** * Details of operations that perform MetadataService.CreateMetadataStore. */ export interface GoogleCloudAiplatformV1CreateMetadataStoreOperationMetadata { /** * Operation metadata for creating a MetadataStore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Metadata information for NotebookService.CreateNotebookExecutionJob. */ export interface GoogleCloudAiplatformV1CreateNotebookExecutionJobOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * A human-readable message that shows the intermediate progress details of * NotebookRuntime. */ progressMessage?: string; } /** * Request message for [NotebookService.CreateNotebookExecutionJob] */ export interface GoogleCloudAiplatformV1CreateNotebookExecutionJobRequest { /** * Required. The NotebookExecutionJob to create. */ notebookExecutionJob?: GoogleCloudAiplatformV1NotebookExecutionJob; /** * Optional. User specified ID for the NotebookExecutionJob. */ notebookExecutionJobId?: string; /** * Required. The resource name of the Location to create the * NotebookExecutionJob. Format: `projects/{project}/locations/{location}` */ parent?: string; } function serializeGoogleCloudAiplatformV1CreateNotebookExecutionJobRequest(data: any): GoogleCloudAiplatformV1CreateNotebookExecutionJobRequest { return { ...data, notebookExecutionJob: data["notebookExecutionJob"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookExecutionJob(data["notebookExecutionJob"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreateNotebookExecutionJobRequest(data: any): GoogleCloudAiplatformV1CreateNotebookExecutionJobRequest { return { ...data, notebookExecutionJob: data["notebookExecutionJob"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookExecutionJob(data["notebookExecutionJob"]) : undefined, }; } /** * Metadata information for NotebookService.CreateNotebookRuntimeTemplate. */ export interface GoogleCloudAiplatformV1CreateNotebookRuntimeTemplateOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create PersistentResource. */ export interface GoogleCloudAiplatformV1CreatePersistentResourceOperationMetadata { /** * Operation metadata for PersistentResource. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Progress Message for Create LRO */ progressMessage?: string; } /** * Request message for PipelineService.CreatePipelineJob. */ export interface GoogleCloudAiplatformV1CreatePipelineJobRequest { /** * Required. The resource name of the Location to create the PipelineJob in. * Format: `projects/{project}/locations/{location}` */ parent?: string; /** * Required. The PipelineJob to create. */ pipelineJob?: GoogleCloudAiplatformV1PipelineJob; /** * The ID to use for the PipelineJob, which will become the final component * of the PipelineJob name. If not provided, an ID will be automatically * generated. This value should be less than 128 characters, and valid * characters are `/a-z-/`. */ pipelineJobId?: string; } function serializeGoogleCloudAiplatformV1CreatePipelineJobRequest(data: any): GoogleCloudAiplatformV1CreatePipelineJobRequest { return { ...data, pipelineJob: data["pipelineJob"] !== undefined ? serializeGoogleCloudAiplatformV1PipelineJob(data["pipelineJob"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreatePipelineJobRequest(data: any): GoogleCloudAiplatformV1CreatePipelineJobRequest { return { ...data, pipelineJob: data["pipelineJob"] !== undefined ? deserializeGoogleCloudAiplatformV1PipelineJob(data["pipelineJob"]) : undefined, }; } /** * Details of operations that perform create FeatureGroup. */ export interface GoogleCloudAiplatformV1CreateRegistryFeatureOperationMetadata { /** * Operation metadata for Feature. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for * SpecialistPoolService.CreateSpecialistPool. */ export interface GoogleCloudAiplatformV1CreateSpecialistPoolOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create Tensorboard. */ export interface GoogleCloudAiplatformV1CreateTensorboardOperationMetadata { /** * Operation metadata for Tensorboard. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for TensorboardService.CreateTensorboardRun. */ export interface GoogleCloudAiplatformV1CreateTensorboardRunRequest { /** * Required. The resource name of the TensorboardExperiment to create the * TensorboardRun in. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ parent?: string; /** * Required. The TensorboardRun to create. */ tensorboardRun?: GoogleCloudAiplatformV1TensorboardRun; /** * Required. The ID to use for the Tensorboard run, which becomes the final * component of the Tensorboard run's resource name. This value should be * 1-128 characters, and valid characters are `/a-z-/`. */ tensorboardRunId?: string; } /** * Request message for TensorboardService.CreateTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { /** * Required. The resource name of the TensorboardRun to create the * TensorboardTimeSeries in. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ parent?: string; /** * Required. The TensorboardTimeSeries to create. */ tensorboardTimeSeries?: GoogleCloudAiplatformV1TensorboardTimeSeries; /** * Optional. The user specified unique ID to use for the * TensorboardTimeSeries, which becomes the final component of the * TensorboardTimeSeries's resource name. This value should match "a-z0-9{0, * 127}" */ tensorboardTimeSeriesId?: string; } function serializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? serializeGoogleCloudAiplatformV1TensorboardTimeSeries(data["tensorboardTimeSeries"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data["tensorboardTimeSeries"]) : undefined, }; } /** * The storage details for CSV output content. */ export interface GoogleCloudAiplatformV1CsvDestination { /** * Required. Google Cloud Storage location. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; } /** * The storage details for CSV input content. */ export interface GoogleCloudAiplatformV1CsvSource { /** * Required. Google Cloud Storage location. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; } /** * Represents a job that runs custom workloads such as a Docker container or a * Python package. A CustomJob can have multiple worker pools and each worker * pool can have its own machine and input spec. A CustomJob will be cleaned up * once the job enters terminal state (failed or succeeded). */ export interface GoogleCloudAiplatformV1CustomJob { /** * Output only. Time when the CustomJob was created. */ readonly createTime?: Date; /** * Required. The display name of the CustomJob. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key options for a CustomJob. If this is set, * then all resources created by the CustomJob will be encrypted with the * provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the CustomJob entered any of the following states: * `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when job's state is `JOB_STATE_FAILED` or * `JOB_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * Required. Job spec. */ jobSpec?: GoogleCloudAiplatformV1CustomJobSpec; /** * The labels with user-defined metadata to organize CustomJobs. Label keys * and values can be no longer than 64 characters (Unicode codepoints), can * only contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. Resource name of a CustomJob. */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the CustomJob for the first time entered the * `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Output only. Time when the CustomJob was most recently updated. */ readonly updateTime?: Date; /** * Output only. URIs for accessing [interactive * shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) * (one URI for each training node). Only available if * job_spec.enable_web_access is `true`. The keys are names of each node in * the training job; for example, `workerpool0-0` for the primary node, * `workerpool1-0` for the first node in the second worker pool, and * `workerpool1-1` for the second node in the second worker pool. The values * are the URIs for each node's interactive shell. */ readonly webAccessUris?: { [key: string]: string }; } function serializeGoogleCloudAiplatformV1CustomJob(data: any): GoogleCloudAiplatformV1CustomJob { return { ...data, jobSpec: data["jobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["jobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CustomJob(data: any): GoogleCloudAiplatformV1CustomJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, jobSpec: data["jobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["jobSpec"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Represents the spec of a CustomJob. */ export interface GoogleCloudAiplatformV1CustomJobSpec { /** * The Cloud Storage location to store the output of this CustomJob or * HyperparameterTuningJob. For HyperparameterTuningJob, the * baseOutputDirectory of each child CustomJob backing a Trial is set to a * subdirectory of name id under its parent HyperparameterTuningJob's * baseOutputDirectory. The following Vertex AI environment variables will be * passed to containers or python modules when this field is set: For * CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = * `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing * a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` */ baseOutputDirectory?: GoogleCloudAiplatformV1GcsDestination; /** * Optional. Whether you want Vertex AI to enable access to the customized * dashboard in training chief container. If set to `true`, you can access the * dashboard at the URIs given by CustomJob.web_access_uris or * Trial.web_access_uris (within HyperparameterTuningJob.trials). */ enableDashboardAccess?: boolean; /** * Optional. Whether you want Vertex AI to enable [interactive shell * access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) * to training containers. If set to `true`, you can access interactive shells * at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris * (within HyperparameterTuningJob.trials). */ enableWebAccess?: boolean; /** * Optional. The Experiment associated with this job. Format: * `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` */ experiment?: string; /** * Optional. The Experiment Run associated with this job. Format: * `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` */ experimentRun?: string; /** * Optional. The name of the Model resources for which to generate a mapping * to artifact URIs. Applicable only to some of the Google-provided custom * jobs. Format: `projects/{project}/locations/{location}/models/{model}` In * order to retrieve a specific version of the model, also provide the version * ID or version alias. Example: * `projects/{project}/locations/{location}/models/{model}@2` or * `projects/{project}/locations/{location}/models/{model}@golden` If no * version ID or alias is specified, the "default" version will be returned. * The "default" version alias is created for the first version of the model, * and can be moved to other versions later on. There will be exactly one * default version. */ models?: string[]; /** * Optional. The full name of the Compute Engine * [network](/compute/docs/networks-and-firewalls#networks) to which the Job * should be peered. For example, `projects/12345/global/networks/myVPC`. * [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in `12345`, and {network} is a network name. To specify * this field, you must have already [configured VPC Network Peering for * Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If * this field is left unspecified, the job is not peered with any network. */ network?: string; /** * Optional. The ID of the PersistentResource in the same Project and * Location which to run If this is specified, the job will be run on existing * machines held by the PersistentResource instead of on-demand short-live * machines. The network and CMEK configs on the job should be consistent with * those on the PersistentResource, otherwise, the job will be rejected. */ persistentResourceId?: string; /** * The ID of the location to store protected artifacts. e.g. us-central1. * Populate only when the location is different than CustomJob location. List * of supported locations: * https://cloud.google.com/vertex-ai/docs/general/locations */ protectedArtifactLocationId?: string; /** * Optional. A list of names for the reserved ip ranges under the VPC network * that can be used for this job. If set, we will deploy the job within the * provided ip ranges. Otherwise, the job will be deployed to any ip ranges * under the provided VPC network. Example: ['vertex-ai-ip-range']. */ reservedIpRanges?: string[]; /** * Scheduling options for a CustomJob. */ scheduling?: GoogleCloudAiplatformV1Scheduling; /** * Specifies the service account for workload run-as account. Users * submitting jobs must have act-as permission on this run-as account. If * unspecified, the [Vertex AI Custom Code Service * Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) * for the CustomJob's project is used. */ serviceAccount?: string; /** * Optional. The name of a Vertex AI Tensorboard resource to which this * CustomJob will upload Tensorboard logs. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ tensorboard?: string; /** * Required. The spec of the worker pools including machine type and Docker * image. All worker pools except the first one are optional and can be * skipped by providing an empty value. */ workerPoolSpecs?: GoogleCloudAiplatformV1WorkerPoolSpec[]; } function serializeGoogleCloudAiplatformV1CustomJobSpec(data: any): GoogleCloudAiplatformV1CustomJobSpec { return { ...data, scheduling: data["scheduling"] !== undefined ? serializeGoogleCloudAiplatformV1Scheduling(data["scheduling"]) : undefined, workerPoolSpecs: data["workerPoolSpecs"] !== undefined ? data["workerPoolSpecs"].map((item: any) => (serializeGoogleCloudAiplatformV1WorkerPoolSpec(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1CustomJobSpec(data: any): GoogleCloudAiplatformV1CustomJobSpec { return { ...data, scheduling: data["scheduling"] !== undefined ? deserializeGoogleCloudAiplatformV1Scheduling(data["scheduling"]) : undefined, workerPoolSpecs: data["workerPoolSpecs"] !== undefined ? data["workerPoolSpecs"].map((item: any) => (deserializeGoogleCloudAiplatformV1WorkerPoolSpec(item))) : undefined, }; } /** * A piece of data in a Dataset. Could be an image, a video, a document or * plain text. */ export interface GoogleCloudAiplatformV1DataItem { /** * Output only. Timestamp when this DataItem was created. */ readonly createTime?: Date; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * DataItems. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * No more than 64 user labels can be associated with one DataItem(System * labels are excluded). See https://goo.gl/xmQnxf for more information and * examples of labels. System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Output only. The resource name of the DataItem. */ readonly name?: string; /** * Required. The data that the DataItem represents (for example, an image or * a text snippet). The schema of the payload is stored in the parent * Dataset's metadata schema's dataItemSchemaUri field. */ payload?: any; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this DataItem was last updated. */ readonly updateTime?: Date; } /** * A container for a single DataItem and Annotations on it. */ export interface GoogleCloudAiplatformV1DataItemView { /** * The Annotations on the DataItem. If too many Annotations should be * returned for the DataItem, this field will be truncated per * annotations_limit in request. If it was, then the has_truncated_annotations * will be set to true. */ annotations?: GoogleCloudAiplatformV1Annotation[]; /** * The DataItem. */ dataItem?: GoogleCloudAiplatformV1DataItem; /** * True if and only if the Annotations field has been truncated. It happens * if more Annotations for this DataItem met the request's annotation_filter * than are allowed to be returned by annotations_limit. Note that if * Annotations field is not being returned due to field mask, then this field * will not be set to true no matter how many Annotations are there. */ hasTruncatedAnnotations?: boolean; } /** * DataLabelingJob is used to trigger a human labeling job on unlabeled data * from the following Dataset: */ export interface GoogleCloudAiplatformV1DataLabelingJob { /** * Parameters that configure the active learning pipeline. Active learning * will label the data incrementally via several iterations. For every * iteration, it will select a batch of data based on the sampling strategy. */ activeLearningConfig?: GoogleCloudAiplatformV1ActiveLearningConfig; /** * Labels to assign to annotations generated by this DataLabelingJob. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. See https://goo.gl/xmQnxf for * more information and examples of labels. System reserved label keys are * prefixed with "aiplatform.googleapis.com/" and are immutable. */ annotationLabels?: { [key: string]: string }; /** * Output only. Timestamp when this DataLabelingJob was created. */ readonly createTime?: Date; /** * Output only. Estimated cost(in US dollars) that the DataLabelingJob has * incurred to date. */ readonly currentSpend?: GoogleTypeMoney; /** * Required. Dataset resource names. Right now we only support labeling from * a single Dataset. Format: * `projects/{project}/locations/{location}/datasets/{dataset}` */ datasets?: string[]; /** * Required. The user-defined name of the DataLabelingJob. The name can be up * to 128 characters long and can consist of any UTF-8 characters. Display * name of a DataLabelingJob. */ displayName?: string; /** * Customer-managed encryption key spec for a DataLabelingJob. If set, this * DataLabelingJob will be secured by this key. Note: Annotations created in * the DataLabelingJob are associated with the EncryptionSpec of the Dataset * they are exported to. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. DataLabelingJob errors. It is only populated when job's state * is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * Required. Input config parameters for the DataLabelingJob. */ inputs?: any; /** * Required. Points to a YAML file stored on Google Cloud Storage describing * the config for a specific type of DataLabelingJob. The schema files that * can be used here are found in the * https://storage.googleapis.com/google-cloud-aiplatform bucket in the * /schema/datalabelingjob/inputs/ folder. */ inputsSchemaUri?: string; /** * Required. The Google Cloud Storage location of the instruction pdf. This * pdf is shared with labelers, and provides detailed description on how to * label DataItems in Datasets. */ instructionUri?: string; /** * Required. Number of labelers to work on each DataItem. */ labelerCount?: number; /** * Output only. Current labeling job progress percentage scaled in interval * [0, 100], indicating the percentage of DataItems that has been finished. */ readonly labelingProgress?: number; /** * The labels with user-defined metadata to organize your DataLabelingJobs. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. Following system labels exist for each DataLabelingJob: * * "aiplatform.googleapis.com/schema": output only, its value is the * inputs_schema's title. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the DataLabelingJob. */ readonly name?: string; /** * The SpecialistPools' resource names associated with this job. */ specialistPools?: string[]; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Output only. Timestamp when this DataLabelingJob was updated most * recently. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1DataLabelingJob(data: any): GoogleCloudAiplatformV1DataLabelingJob { return { ...data, activeLearningConfig: data["activeLearningConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ActiveLearningConfig(data["activeLearningConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DataLabelingJob(data: any): GoogleCloudAiplatformV1DataLabelingJob { return { ...data, activeLearningConfig: data["activeLearningConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ActiveLearningConfig(data["activeLearningConfig"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, currentSpend: data["currentSpend"] !== undefined ? deserializeGoogleTypeMoney(data["currentSpend"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * A collection of DataItems and Annotations on them. */ export interface GoogleCloudAiplatformV1Dataset { /** * Output only. Timestamp when this Dataset was created. */ readonly createTime?: Date; /** * Output only. The number of DataItems in this Dataset. Only apply for * non-structured Dataset. */ readonly dataItemCount?: bigint; /** * The description of the Dataset. */ description?: string; /** * Required. The user-defined name of the Dataset. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for a Dataset. If set, this Dataset * and all sub-resources of this Dataset will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Datasets. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Dataset (System labels are excluded). See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. Following system labels exist for each Dataset: * * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its value * is the metadata_schema's title. */ labels?: { [key: string]: string }; /** * Required. Additional information about the Dataset. */ metadata?: any; /** * Output only. The resource name of the Artifact that was created in * MetadataStore when creating the Dataset. The Artifact resource name pattern * is * `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. */ readonly metadataArtifact?: string; /** * Required. Points to a YAML file stored on Google Cloud Storage describing * additional information about the Dataset. The schema is defined as an * OpenAPI 3.0.2 Schema Object. The schema files that can be used here are * found in gs://google-cloud-aiplatform/schema/dataset/metadata/. */ metadataSchemaUri?: string; /** * Optional. Reference to the public base model last used by the dataset. * Only set for prompt datasets. */ modelReference?: string; /** * Output only. Identifier. The resource name of the Dataset. */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * All SavedQueries belong to the Dataset will be returned in List/Get * Dataset response. The annotation_specs field will not be populated except * for UI cases which will only use annotation_spec_count. In CreateDataset * request, a SavedQuery is created together if this field is set, up to one * SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not * contain any AnnotationSpec. */ savedQueries?: GoogleCloudAiplatformV1SavedQuery[]; /** * Output only. Timestamp when this Dataset was last updated. */ readonly updateTime?: Date; } /** * Describes the dataset version. */ export interface GoogleCloudAiplatformV1DatasetVersion { /** * Output only. Name of the associated BigQuery dataset. */ readonly bigQueryDatasetName?: string; /** * Output only. Timestamp when this DatasetVersion was created. */ readonly createTime?: Date; /** * The user-defined name of the DatasetVersion. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * Required. Output only. Additional information about the DatasetVersion. */ readonly metadata?: any; /** * Output only. Reference to the public base model last used by the dataset * version. Only set for prompt dataset versions. */ readonly modelReference?: string; /** * Output only. Identifier. The resource name of the DatasetVersion. */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this DatasetVersion was last updated. */ readonly updateTime?: Date; } /** * A description of resources that are dedicated to a DeployedModel, and that * need a higher degree of manual configuration. */ export interface GoogleCloudAiplatformV1DedicatedResources { /** * Immutable. The metric specifications that overrides a resource utilization * metric (CPU utilization, accelerator's duty cycle, and so on) target value * (default to 60 if not set). At most one entry is allowed per metric. If * machine_spec.accelerator_count is above 0, the autoscaling will be based on * both CPU utilization and accelerator's duty cycle metrics and scale up when * either metrics exceeds its target value while scale down if both metrics * are under their target value. The default target value is 60 for both * metrics. If machine_spec.accelerator_count is 0, the autoscaling will be * based on CPU utilization metric only with default target value 60 if not * explicitly set. For example, in the case of Online Prediction, if you want * to override target CPU utilization to 80, you should set * autoscaling_metric_specs.metric_name to * `aiplatform.googleapis.com/prediction/online/cpu/utilization` and * autoscaling_metric_specs.target to `80`. */ autoscalingMetricSpecs?: GoogleCloudAiplatformV1AutoscalingMetricSpec[]; /** * Required. Immutable. The specification of a single machine used by the * prediction. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * Immutable. The maximum number of replicas this DeployedModel may be * deployed on when the traffic against it increases. If the requested value * is too large, the deployment will error, but if deployment succeeds then * the ability to scale the model to that many replicas is guaranteed (barring * service outages). If traffic against the DeployedModel increases beyond * what its replicas at maximum may handle, a portion of the traffic will be * dropped. If this value is not provided, will use min_replica_count as the * default value. The value of this field impacts the charge against Vertex * CPU and GPU quotas. Specifically, you will be charged for * (max_replica_count * number of cores in the selected machine type) and * (max_replica_count * number of GPUs per replica in the selected machine * type). */ maxReplicaCount?: number; /** * Required. Immutable. The minimum number of machine replicas this * DeployedModel will be always deployed on. This value must be greater than * or equal to 1. If traffic against the DeployedModel increases, it may * dynamically be deployed onto more replicas, and as traffic decreases, some * of these extra replicas may be freed. */ minReplicaCount?: number; /** * Optional. Number of required available replicas for the deployment to * succeed. This field is only needed when partial model deployment/mutation * is desired. If set, the model deploy/mutate operation will succeed once * available_replica_count reaches required_replica_count, and the rest of the * replicas will be retried. If not set, the default required_replica_count * will be min_replica_count. */ requiredReplicaCount?: number; /** * Optional. If true, schedule the deployment workload on [spot * VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). */ spot?: boolean; } /** * Details of operations that delete Feature values. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesOperationMetadata { /** * Operation metadata for Featurestore delete Features values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.DeleteFeatureValues. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesRequest { /** * Select feature values to be deleted by specifying entities. */ selectEntity?: GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity; /** * Select feature values to be deleted by specifying time range and features. */ selectTimeRangeAndFeature?: GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequest(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequest { return { ...data, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesRequest(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequest { return { ...data, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? deserializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } /** * Message to select entity. If an entity id is selected, all the feature * values corresponding to the entity id will be deleted, including the * entityId. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity { /** * Required. Selectors choosing feature values of which entity id to be * deleted from the EntityType. */ entityIdSelector?: GoogleCloudAiplatformV1EntityIdSelector; } /** * Message to select time range and feature. Values of the selected feature * generated within an inclusive time range will be deleted. Using this option * permanently deletes the feature values from the specified feature IDs within * the specified time range. This might include data from the online storage. If * you want to retain any deleted historical data in the online storage, you * must re-ingest it. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { /** * Required. Selectors choosing which feature values to be deleted from the * EntityType. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; /** * If set, data will not be deleted from online storage. When time range is * older than the data in online storage, setting this to be true will make * the deletion have no impact on online serving. */ skipOnlineStorageDelete?: boolean; /** * Required. Select feature generated within a half-inclusive time range. The * time range is lower inclusive and upper exclusive. */ timeRange?: GoogleTypeInterval; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { return { ...data, timeRange: data["timeRange"] !== undefined ? serializeGoogleTypeInterval(data["timeRange"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { return { ...data, timeRange: data["timeRange"] !== undefined ? deserializeGoogleTypeInterval(data["timeRange"]) : undefined, }; } /** * Response message for FeaturestoreService.DeleteFeatureValues. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesResponse { /** * Response for request specifying the entities to delete */ selectEntity?: GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity; /** * Response for request specifying time range and feature */ selectTimeRangeAndFeature?: GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponse(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponse { return { ...data, selectEntity: data["selectEntity"] !== undefined ? serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data["selectEntity"]) : undefined, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponse(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponse { return { ...data, selectEntity: data["selectEntity"] !== undefined ? deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data["selectEntity"]) : undefined, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } /** * Response message if the request uses the SelectEntity option. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { /** * The count of deleted entity rows in the offline storage. Each row * corresponds to the combination of an entity ID and a timestamp. One entity * ID can have multiple rows in the offline storage. */ offlineStorageDeletedEntityRowCount?: bigint; /** * The count of deleted entities in the online storage. Each entity ID * corresponds to one entity. */ onlineStorageDeletedEntityCount?: bigint; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { return { ...data, offlineStorageDeletedEntityRowCount: data["offlineStorageDeletedEntityRowCount"] !== undefined ? String(data["offlineStorageDeletedEntityRowCount"]) : undefined, onlineStorageDeletedEntityCount: data["onlineStorageDeletedEntityCount"] !== undefined ? String(data["onlineStorageDeletedEntityCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { return { ...data, offlineStorageDeletedEntityRowCount: data["offlineStorageDeletedEntityRowCount"] !== undefined ? BigInt(data["offlineStorageDeletedEntityRowCount"]) : undefined, onlineStorageDeletedEntityCount: data["onlineStorageDeletedEntityCount"] !== undefined ? BigInt(data["onlineStorageDeletedEntityCount"]) : undefined, }; } /** * Response message if the request uses the SelectTimeRangeAndFeature option. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { /** * The count of the features or columns impacted. This is the same as the * feature count in the request. */ impactedFeatureCount?: bigint; /** * The count of modified entity rows in the offline storage. Each row * corresponds to the combination of an entity ID and a timestamp. One entity * ID can have multiple rows in the offline storage. Within each row, only the * features specified in the request are deleted. */ offlineStorageModifiedEntityRowCount?: bigint; /** * The count of modified entities in the online storage. Each entity ID * corresponds to one entity. Within each entity, only the features specified * in the request are deleted. */ onlineStorageModifiedEntityCount?: bigint; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { return { ...data, impactedFeatureCount: data["impactedFeatureCount"] !== undefined ? String(data["impactedFeatureCount"]) : undefined, offlineStorageModifiedEntityRowCount: data["offlineStorageModifiedEntityRowCount"] !== undefined ? String(data["offlineStorageModifiedEntityRowCount"]) : undefined, onlineStorageModifiedEntityCount: data["onlineStorageModifiedEntityCount"] !== undefined ? String(data["onlineStorageModifiedEntityCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { return { ...data, impactedFeatureCount: data["impactedFeatureCount"] !== undefined ? BigInt(data["impactedFeatureCount"]) : undefined, offlineStorageModifiedEntityRowCount: data["offlineStorageModifiedEntityRowCount"] !== undefined ? BigInt(data["offlineStorageModifiedEntityRowCount"]) : undefined, onlineStorageModifiedEntityCount: data["onlineStorageModifiedEntityCount"] !== undefined ? BigInt(data["onlineStorageModifiedEntityCount"]) : undefined, }; } /** * Details of operations that perform MetadataService.DeleteMetadataStore. */ export interface GoogleCloudAiplatformV1DeleteMetadataStoreOperationMetadata { /** * Operation metadata for deleting a MetadataStore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform deletes of any entities. */ export interface GoogleCloudAiplatformV1DeleteOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * A deployment of an Index. IndexEndpoints contain one or more * DeployedIndexes. */ export interface GoogleCloudAiplatformV1DeployedIndex { /** * Optional. A description of resources that the DeployedIndex uses, which to * large degree are decided by Vertex AI, and optionally allows only a modest * additional configuration. If min_replica_count is not set, the default * value is 2 (we don't provide SLA when min_replica_count=1). If * max_replica_count is not set, the default value is min_replica_count. The * max allowed replica count is 1000. */ automaticResources?: GoogleCloudAiplatformV1AutomaticResources; /** * Output only. Timestamp when the DeployedIndex was created. */ readonly createTime?: Date; /** * Optional. A description of resources that are dedicated to the * DeployedIndex, and that need a higher degree of manual configuration. The * field min_replica_count must be set to a value strictly greater than 0, or * else validation will fail. We don't provide SLA when min_replica_count=1. * If max_replica_count is not set, the default value is min_replica_count. * The max allowed replica count is 1000. Available machine types for SMALL * shard: e2-standard-2 and all machine types available for MEDIUM and LARGE * shard. Available machine types for MEDIUM shard: e2-standard-16 and all * machine types available for LARGE shard. Available machine types for LARGE * shard: e2-highmem-16, n2d-standard-32. n1-standard-16 and n1-standard-32 * are still available, but we recommend e2-standard-16 and e2-highmem-16 for * cost efficiency. */ dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources; /** * Optional. If set, the authentication is enabled for the private endpoint. */ deployedIndexAuthConfig?: GoogleCloudAiplatformV1DeployedIndexAuthConfig; /** * Optional. The deployment group can be no longer than 64 characters (eg: * 'test', 'prod'). If not set, we will use the 'default' deployment group. * Creating `deployment_groups` with `reserved_ip_ranges` is a recommended * practice when the peered network has multiple peering ranges. This creates * your deployments from predictable IP spaces for easier traffic * administration. Also, one deployment_group (except 'default') can only be * used with the same reserved_ip_ranges which means if the deployment_group * has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or * [d, e] is disallowed. Note: we only support up to 5 deployment groups(not * including 'default'). */ deploymentGroup?: string; /** * The display name of the DeployedIndex. If not provided upon creation, the * Index's display_name is used. */ displayName?: string; /** * Optional. If true, private endpoint's access logs are sent to Cloud * Logging. These logs are like standard server access logs, containing * information like timestamp and latency for each MatchRequest. Note that * logs may incur a cost, especially if the deployed index receives a high * queries per second rate (QPS). Estimate your costs before enabling this * option. */ enableAccessLogging?: boolean; /** * Required. The user specified ID of the DeployedIndex. The ID can be up to * 128 characters long and must start with a letter and only contain letters, * numbers, and underscores. The ID must be unique within the project it is * created in. */ id?: string; /** * Required. The name of the Index this is the deployment of. We may refer to * this Index as the DeployedIndex's "original" Index. */ index?: string; /** * Output only. The DeployedIndex may depend on various data on its original * Index. Additionally when certain changes to the original Index are being * done (e.g. when what the Index contains is being changed) the DeployedIndex * may be asynchronously updated in the background to reflect these changes. * If this timestamp's value is at least the Index.update_time of the original * Index, it means that this DeployedIndex and the original Index are in sync. * If this timestamp is older, then to see which updates this DeployedIndex * already contains (and which it does not), one must list the operations that * are running on the original Index. Only the successfully completed * Operations with update_time equal or before this sync time are contained in * this DeployedIndex. */ readonly indexSyncTime?: Date; /** * Output only. Provides paths for users to send requests directly to the * deployed index services running on Cloud via private services access. This * field is populated if network is configured. */ readonly privateEndpoints?: GoogleCloudAiplatformV1IndexPrivateEndpoints; /** * Optional. If set for PSC deployed index, PSC connection will be * automatically created after deployment is done and the endpoint information * is populated in private_endpoints.psc_automated_endpoints. */ pscAutomationConfigs?: GoogleCloudAiplatformV1PSCAutomationConfig[]; /** * Optional. A list of reserved ip ranges under the VPC network that can be * used for this DeployedIndex. If set, we will deploy the index within the * provided ip ranges. Otherwise, the index might be deployed to any ip ranges * under the provided VPC network. The value should be the name of the address * (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) * Example: ['vertex-ai-ip-range']. For more information about subnets and * network IP ranges, please see * https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. */ reservedIpRanges?: string[]; } /** * Used to set up the auth on the DeployedIndex's private endpoint. */ export interface GoogleCloudAiplatformV1DeployedIndexAuthConfig { /** * Defines the authentication provider that the DeployedIndex uses. */ authProvider?: GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider; } /** * Configuration for an authentication provider, including support for [JSON * Web Token * (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). */ export interface GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider { /** * A list of allowed JWT issuers. Each entry must be a valid Google service * account, in the following format: * `service-account-name@project-id.iam.gserviceaccount.com` */ allowedIssuers?: string[]; /** * The list of JWT * [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). * that are allowed to access. A JWT containing any of these audiences will be * accepted. */ audiences?: string[]; } /** * Points to a DeployedIndex. */ export interface GoogleCloudAiplatformV1DeployedIndexRef { /** * Immutable. The ID of the DeployedIndex in the above IndexEndpoint. */ deployedIndexId?: string; /** * Output only. The display name of the DeployedIndex. */ readonly displayName?: string; /** * Immutable. A resource name of the IndexEndpoint. */ indexEndpoint?: string; } /** * A deployment of a Model. Endpoints contain one or more DeployedModels. */ export interface GoogleCloudAiplatformV1DeployedModel { /** * A description of resources that to large degree are decided by Vertex AI, * and require only a modest additional configuration. */ automaticResources?: GoogleCloudAiplatformV1AutomaticResources; /** * Output only. Timestamp when the DeployedModel was created. */ readonly createTime?: Date; /** * A description of resources that are dedicated to the DeployedModel, and * that need a higher degree of manual configuration. */ dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources; /** * For custom-trained Models and AutoML Tabular Models, the container of the * DeployedModel instances will send `stderr` and `stdout` streams to Cloud * Logging by default. Please note that the logs incur cost, which are subject * to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User * can disable container logging by setting this flag to true. */ disableContainerLogging?: boolean; /** * If true, deploy the model without explainable feature, regardless the * existence of Model.explanation_spec or explanation_spec. */ disableExplanations?: boolean; /** * The display name of the DeployedModel. If not provided upon creation, the * Model's display_name is used. */ displayName?: string; /** * If true, online prediction access logs are sent to Cloud Logging. These * logs are like standard server access logs, containing information like * timestamp and latency for each prediction request. Note that logs may incur * a cost, especially if your project receives prediction requests at a high * queries per second rate (QPS). Estimate your costs before enabling this * option. */ enableAccessLogging?: boolean; /** * Explanation configuration for this DeployedModel. When deploying a Model * using EndpointService.DeployModel, this value overrides the value of * Model.explanation_spec. All fields of explanation_spec are optional in the * request. If a field of explanation_spec is not populated, the value of the * same field of Model.explanation_spec is inherited. If the corresponding * Model.explanation_spec is not populated, all fields of the explanation_spec * will be used for the explanation configuration. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * Configuration for faster model deployment. */ fasterDeploymentConfig?: GoogleCloudAiplatformV1FasterDeploymentConfig; /** * Immutable. The ID of the DeployedModel. If not provided upon deployment, * Vertex AI will generate a value for this ID. This value should be 1-10 * characters, and valid characters are `/[0-9]/`. */ id?: string; /** * Required. The resource name of the Model that this is the deployment of. * Note that the Model may be in a different location than the DeployedModel's * Endpoint. The resource name may contain version id or version alias to * specify the version. Example: * `projects/{project}/locations/{location}/models/{model}@2` or * `projects/{project}/locations/{location}/models/{model}@golden` if no * version is specified, the default version will be deployed. */ model?: string; /** * Output only. The version ID of the model that is deployed. */ readonly modelVersionId?: string; /** * Output only. Provide paths for users to send predict/explain/health * requests directly to the deployed model services running on Cloud via * private services access. This field is populated if network is configured. */ readonly privateEndpoints?: GoogleCloudAiplatformV1PrivateEndpoints; /** * The service account that the DeployedModel's container runs as. Specify * the email address of the service account. If this service account is not * specified, the container runs as a service account that doesn't have access * to the resource project. Users deploying the Model must have the * `iam.serviceAccounts.actAs` permission on this service account. */ serviceAccount?: string; /** * The resource name of the shared DeploymentResourcePool to deploy on. * Format: * `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ sharedResources?: string; /** * Output only. Runtime status of the deployed model. */ readonly status?: GoogleCloudAiplatformV1DeployedModelStatus; /** * System labels to apply to Model Garden deployments. System labels are * managed by Google for internal use only. */ systemLabels?: { [key: string]: string }; } /** * Points to a DeployedModel. */ export interface GoogleCloudAiplatformV1DeployedModelRef { /** * Immutable. An ID of a DeployedModel in the above Endpoint. */ deployedModelId?: string; /** * Immutable. A resource name of an Endpoint. */ endpoint?: string; } /** * Runtime status of the deployed model. */ export interface GoogleCloudAiplatformV1DeployedModelStatus { /** * Output only. The number of available replicas of the deployed model. */ readonly availableReplicaCount?: number; /** * Output only. The time at which the status was last updated. */ readonly lastUpdateTime?: Date; /** * Output only. The latest deployed model's status message (if any). */ readonly message?: string; } /** * Runtime operation information for IndexEndpointService.DeployIndex. */ export interface GoogleCloudAiplatformV1DeployIndexOperationMetadata { /** * The unique index id specified by user */ deployedIndexId?: string; /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for IndexEndpointService.DeployIndex. */ export interface GoogleCloudAiplatformV1DeployIndexRequest { /** * Required. The DeployedIndex to be created within the IndexEndpoint. */ deployedIndex?: GoogleCloudAiplatformV1DeployedIndex; } /** * Response message for IndexEndpointService.DeployIndex. */ export interface GoogleCloudAiplatformV1DeployIndexResponse { /** * The DeployedIndex that had been deployed in the IndexEndpoint. */ deployedIndex?: GoogleCloudAiplatformV1DeployedIndex; } /** * A description of resources that can be shared by multiple DeployedModels, * whose underlying specification consists of a DedicatedResources. */ export interface GoogleCloudAiplatformV1DeploymentResourcePool { /** * Output only. Timestamp when this DeploymentResourcePool was created. */ readonly createTime?: Date; /** * Required. The underlying DedicatedResources that the * DeploymentResourcePool uses. */ dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources; /** * If the DeploymentResourcePool is deployed with custom-trained Models or * AutoML Tabular Models, the container(s) of the DeploymentResourcePool will * send `stderr` and `stdout` streams to Cloud Logging by default. Please note * that the logs incur cost, which are subject to [Cloud Logging * pricing](https://cloud.google.com/logging/pricing). User can disable * container logging by setting this flag to true. */ disableContainerLogging?: boolean; /** * Customer-managed encryption key spec for a DeploymentResourcePool. If set, * this DeploymentResourcePool will be secured by this key. Endpoints and the * DeploymentResourcePool they deploy in need to have the same EncryptionSpec. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Immutable. The resource name of the DeploymentResourcePool. Format: * `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * The service account that the DeploymentResourcePool's container(s) run as. * Specify the email address of the service account. If this service account * is not specified, the container(s) run as a service account that doesn't * have access to the resource project. Users deploying the Models to this * DeploymentResourcePool must have the `iam.serviceAccounts.actAs` permission * on this service account. */ serviceAccount?: string; } /** * Runtime operation information for EndpointService.DeployModel. */ export interface GoogleCloudAiplatformV1DeployModelOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for EndpointService.DeployModel. */ export interface GoogleCloudAiplatformV1DeployModelRequest { /** * Required. The DeployedModel to be created within the Endpoint. Note that * Endpoint.traffic_split must be updated for the DeployedModel to start * receiving traffic, either as part of this call, or via * EndpointService.UpdateEndpoint. */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; /** * A map from a DeployedModel's ID to the percentage of this Endpoint's * traffic that should be forwarded to that DeployedModel. If this field is * non-empty, then the Endpoint's traffic_split will be overwritten with it. * To refer to the ID of the just being deployed Model, a "0" should be used, * and the actual ID of the new DeployedModel will be filled in its place by * this method. The traffic percentage values must add up to 100. If this * field is empty, then the Endpoint's traffic_split is not updated. */ trafficSplit?: { [key: string]: number }; } /** * Response message for EndpointService.DeployModel. */ export interface GoogleCloudAiplatformV1DeployModelResponse { /** * The DeployedModel that had been deployed in the Endpoint. */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; } export interface GoogleCloudAiplatformV1DestinationFeatureSetting { /** * Specify the field name in the export destination. If not specified, * Feature ID is used. */ destinationField?: string; /** * Required. The ID of the Feature to apply the setting to. */ featureId?: string; } /** * Request message for PredictionService.DirectPredict. */ export interface GoogleCloudAiplatformV1DirectPredictRequest { /** * The prediction input. */ inputs?: GoogleCloudAiplatformV1Tensor[]; /** * The parameters that govern the prediction. */ parameters?: GoogleCloudAiplatformV1Tensor; } function serializeGoogleCloudAiplatformV1DirectPredictRequest(data: any): GoogleCloudAiplatformV1DirectPredictRequest { return { ...data, inputs: data["inputs"] !== undefined ? data["inputs"].map((item: any) => (serializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectPredictRequest(data: any): GoogleCloudAiplatformV1DirectPredictRequest { return { ...data, inputs: data["inputs"] !== undefined ? data["inputs"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } /** * Response message for PredictionService.DirectPredict. */ export interface GoogleCloudAiplatformV1DirectPredictResponse { /** * The prediction output. */ outputs?: GoogleCloudAiplatformV1Tensor[]; /** * The parameters that govern the prediction. */ parameters?: GoogleCloudAiplatformV1Tensor; } function serializeGoogleCloudAiplatformV1DirectPredictResponse(data: any): GoogleCloudAiplatformV1DirectPredictResponse { return { ...data, outputs: data["outputs"] !== undefined ? data["outputs"].map((item: any) => (serializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectPredictResponse(data: any): GoogleCloudAiplatformV1DirectPredictResponse { return { ...data, outputs: data["outputs"] !== undefined ? data["outputs"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } /** * Request message for PredictionService.DirectRawPredict. */ export interface GoogleCloudAiplatformV1DirectRawPredictRequest { /** * The prediction input. */ input?: Uint8Array; /** * Fully qualified name of the API method being invoked to perform * predictions. Format: `/namespace.Service/Method/` Example: * `/tensorflow.serving.PredictionService/Predict` */ methodName?: string; } function serializeGoogleCloudAiplatformV1DirectRawPredictRequest(data: any): GoogleCloudAiplatformV1DirectRawPredictRequest { return { ...data, input: data["input"] !== undefined ? encodeBase64(data["input"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectRawPredictRequest(data: any): GoogleCloudAiplatformV1DirectRawPredictRequest { return { ...data, input: data["input"] !== undefined ? decodeBase64(data["input"] as string) : undefined, }; } /** * Response message for PredictionService.DirectRawPredict. */ export interface GoogleCloudAiplatformV1DirectRawPredictResponse { /** * The prediction output. */ output?: Uint8Array; } function serializeGoogleCloudAiplatformV1DirectRawPredictResponse(data: any): GoogleCloudAiplatformV1DirectRawPredictResponse { return { ...data, output: data["output"] !== undefined ? encodeBase64(data["output"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectRawPredictResponse(data: any): GoogleCloudAiplatformV1DirectRawPredictResponse { return { ...data, output: data["output"] !== undefined ? decodeBase64(data["output"] as string) : undefined, }; } /** * The input content is encapsulated and uploaded in the request. */ export interface GoogleCloudAiplatformV1DirectUploadSource { } /** * Represents the spec of disk options. */ export interface GoogleCloudAiplatformV1DiskSpec { /** * Size in GB of the boot disk (default is 100GB). */ bootDiskSizeGb?: number; /** * Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" * (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard * Disk Drive). */ bootDiskType?: string; } /** * A list of double values. */ export interface GoogleCloudAiplatformV1DoubleArray { /** * A list of double values. */ values?: number[]; } /** * Describes the options to customize dynamic retrieval. */ export interface GoogleCloudAiplatformV1DynamicRetrievalConfig { /** * Optional. The threshold to be used in dynamic retrieval. If not set, a * system default value is used. */ dynamicThreshold?: number; /** * The mode of the predictor to be used in dynamic retrieval. */ mode?: | "MODE_UNSPECIFIED" | "MODE_DYNAMIC"; } /** * Represents a customer-managed encryption key spec that can be applied to a * top-level resource. */ export interface GoogleCloudAiplatformV1EncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed * encryption key used to protect a resource. Has the form: * `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. * The key needs to be in the same region as where the compute resource is * created. */ kmsKeyName?: string; } /** * Models are deployed into it, and afterwards Endpoint is called to obtain * predictions and explanations. */ export interface GoogleCloudAiplatformV1Endpoint { /** * Configurations that are applied to the endpoint for online prediction. */ clientConnectionConfig?: GoogleCloudAiplatformV1ClientConnectionConfig; /** * Output only. Timestamp when this Endpoint was created. */ readonly createTime?: Date; /** * Output only. DNS of the dedicated endpoint. Will only be populated if * dedicated_endpoint_enabled is true. Format: * `https://{endpoint_id}.{region}-{project_number}.prediction.vertexai.goog`. */ readonly dedicatedEndpointDns?: string; /** * If true, the endpoint will be exposed through a dedicated DNS * [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will * be isolated from other users' traffic and will have better performance and * reliability. Note: Once you enabled dedicated endpoint, you won't be able * to send request to the shared DNS {region}-aiplatform.googleapis.com. The * limitation will be removed soon. */ dedicatedEndpointEnabled?: boolean; /** * Output only. The models deployed in this Endpoint. To add or remove * DeployedModels use EndpointService.DeployModel and * EndpointService.UndeployModel respectively. */ readonly deployedModels?: GoogleCloudAiplatformV1DeployedModel[]; /** * The description of the Endpoint. */ description?: string; /** * Required. The display name of the Endpoint. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Deprecated: If true, expose the Endpoint via private service connect. Only * one of the fields, network or enable_private_service_connect, can be set. */ enablePrivateServiceConnect?: boolean; /** * Customer-managed encryption key spec for an Endpoint. If set, this * Endpoint and all sub-resources of this Endpoint will be secured by this * key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Endpoints. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. See https://goo.gl/xmQnxf for * more information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the Model Monitoring job associated with * this Endpoint if monitoring is enabled by * JobService.CreateModelDeploymentMonitoringJob. Format: * `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ readonly modelDeploymentMonitoringJob?: string; /** * Output only. The resource name of the Endpoint. */ readonly name?: string; /** * Optional. The full name of the Google Compute Engine * [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) * to which the Endpoint should be peered. Private services access must * already be configured for the network. If left unspecified, the Endpoint is * not peered with any network. Only one of the fields, network or * enable_private_service_connect, can be set. * [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): * `projects/{project}/global/networks/{network}`. Where `{project}` is a * project number, as in `12345`, and `{network}` is network name. */ network?: string; /** * Configures the request-response logging for online prediction. */ predictRequestResponseLoggingConfig?: GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig; /** * Optional. Configuration for private service connect. network and * private_service_connect_config are mutually exclusive. */ privateServiceConnectConfig?: GoogleCloudAiplatformV1PrivateServiceConnectConfig; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * A map from a DeployedModel's ID to the percentage of this Endpoint's * traffic that should be forwarded to that DeployedModel. If a * DeployedModel's ID is not listed in this map, then it receives no traffic. * The traffic percentage values must add up to 100, or map must be empty if * the Endpoint is to not accept any traffic at a moment. */ trafficSplit?: { [key: string]: number }; /** * Output only. Timestamp when this Endpoint was last updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1Endpoint(data: any): GoogleCloudAiplatformV1Endpoint { return { ...data, clientConnectionConfig: data["clientConnectionConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ClientConnectionConfig(data["clientConnectionConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Endpoint(data: any): GoogleCloudAiplatformV1Endpoint { return { ...data, clientConnectionConfig: data["clientConnectionConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ClientConnectionConfig(data["clientConnectionConfig"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Selector for entityId. Getting ids from the given source. */ export interface GoogleCloudAiplatformV1EntityIdSelector { /** * Source of Csv */ csvSource?: GoogleCloudAiplatformV1CsvSource; /** * Source column that holds entity IDs. If not provided, entity IDs are * extracted from the column named entity_id. */ entityIdField?: string; } /** * An entity type is a type of object in a system that needs to be modeled and * have stored information about. For example, driver is an entity type, and * driver0 is an instance of an entity type driver. */ export interface GoogleCloudAiplatformV1EntityType { /** * Output only. Timestamp when this EntityType was created. */ readonly createTime?: Date; /** * Optional. Description of the EntityType. */ description?: string; /** * Optional. Used to perform a consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * EntityTypes. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one EntityType (System * labels are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Optional. The default monitoring configuration for all Features with value * type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 under this * EntityType. If this is populated with * [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot * analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is * disabled. */ monitoringConfig?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfig; /** * Immutable. Name of the EntityType. Format: * `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` * The last part entity_type is assigned by the client. The entity_type can be * up to 64 characters long and can consist only of ASCII Latin letters A-Z * and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The * value will be unique given a featurestore. */ name?: string; /** * Optional. Config for data retention policy in offline storage. TTL in days * for feature values that will be stored in offline storage. The Feature * Store offline storage periodically removes obsolete feature values older * than `offline_storage_ttl_days` since the feature generation time. If unset * (or explicitly set to 0), default to 4000 days TTL. */ offlineStorageTtlDays?: number; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this EntityType was most recently updated. */ readonly updateTime?: Date; } /** * Represents an environment variable present in a Container or Python Module. */ export interface GoogleCloudAiplatformV1EnvVar { /** * Required. Name of the environment variable. Must be a valid C identifier. */ name?: string; /** * Required. Variables that reference a $(VAR_NAME) are expanded using the * previous defined environment variables in the container and any service * environment variables. If a variable cannot be resolved, the reference in * the input string will be unchanged. The $(VAR_NAME) syntax can be escaped * with a double $$, ie: $$(VAR_NAME). Escaped references will never be * expanded, regardless of whether the variable exists or not. */ value?: string; } /** * Model error analysis for each annotation. */ export interface GoogleCloudAiplatformV1ErrorAnalysisAnnotation { /** * Attributed items for a given annotation, typically representing neighbors * from the training sets constrained by the query type. */ attributedItems?: GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem[]; /** * The outlier score of this annotated item. Usually defined as the min of * all distances from attributed items. */ outlierScore?: number; /** * The threshold used to determine if this annotation is an outlier or not. */ outlierThreshold?: number; /** * The query type used for finding the attributed items. */ queryType?: | "QUERY_TYPE_UNSPECIFIED" | "ALL_SIMILAR" | "SAME_CLASS_SIMILAR" | "SAME_CLASS_DISSIMILAR"; } /** * Attributed items for a given annotation, typically representing neighbors * from the training sets constrained by the query type. */ export interface GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem { /** * The unique ID for each annotation. Used by FE to allocate the annotation * in DB. */ annotationResourceName?: string; /** * The distance of this item to the annotation. */ distance?: number; } /** * True positive, false positive, or false negative. EvaluatedAnnotation is * only available under ModelEvaluationSlice with slice of `annotationSpec` * dimension. */ export interface GoogleCloudAiplatformV1EvaluatedAnnotation { /** * Output only. The data item payload that the Model predicted this * EvaluatedAnnotation on. */ readonly dataItemPayload?: any; /** * Annotations of model error analysis results. */ errorAnalysisAnnotations?: GoogleCloudAiplatformV1ErrorAnalysisAnnotation[]; /** * Output only. ID of the EvaluatedDataItemView under the same ancestor * ModelEvaluation. The EvaluatedDataItemView consists of all ground truths * and predictions on data_item_payload. */ readonly evaluatedDataItemViewId?: string; /** * Explanations of predictions. Each element of the explanations indicates * the explanation for one explanation Method. The attributions list in the * EvaluatedAnnotationExplanation.explanation object corresponds to the * predictions list. For example, the second element in the attributions list * explains the second element in the predictions list. */ explanations?: GoogleCloudAiplatformV1EvaluatedAnnotationExplanation[]; /** * Output only. The ground truth Annotations, i.e. the Annotations that exist * in the test data the Model is evaluated on. For true positive, there is one * and only one ground truth annotation, which matches the only prediction in * predictions. For false positive, there are zero or more ground truth * annotations that are similar to the only prediction in predictions, but not * enough for a match. For false negative, there is one and only one ground * truth annotation, which doesn't match any predictions created by the model. * The schema of the ground truth is stored in * ModelEvaluation.annotation_schema_uri */ readonly groundTruths?: any[]; /** * Output only. The model predicted annotations. For true positive, there is * one and only one prediction, which matches the only one ground truth * annotation in ground_truths. For false positive, there is one and only one * prediction, which doesn't match any ground truth annotation of the * corresponding data_item_view_id. For false negative, there are zero or more * predictions which are similar to the only ground truth annotation in * ground_truths but not enough for a match. The schema of the prediction is * stored in ModelEvaluation.annotation_schema_uri */ readonly predictions?: any[]; /** * Output only. Type of the EvaluatedAnnotation. */ readonly type?: | "EVALUATED_ANNOTATION_TYPE_UNSPECIFIED" | "TRUE_POSITIVE" | "FALSE_POSITIVE" | "FALSE_NEGATIVE"; } /** * Explanation result of the prediction produced by the Model. */ export interface GoogleCloudAiplatformV1EvaluatedAnnotationExplanation { /** * Explanation attribution response details. */ explanation?: GoogleCloudAiplatformV1Explanation; /** * Explanation type. For AutoML Image Classification models, possible values * are: * `image-integrated-gradients` * `image-xrai` */ explanationType?: string; } /** * Request message for EvaluationService.EvaluateInstances. */ export interface GoogleCloudAiplatformV1EvaluateInstancesRequest { /** * Instances and metric spec for bleu metric. */ bleuInput?: GoogleCloudAiplatformV1BleuInput; /** * Input for coherence metric. */ coherenceInput?: GoogleCloudAiplatformV1CoherenceInput; /** * Translation metrics. Input for Comet metric. */ cometInput?: GoogleCloudAiplatformV1CometInput; /** * Auto metric instances. Instances and metric spec for exact match metric. */ exactMatchInput?: GoogleCloudAiplatformV1ExactMatchInput; /** * LLM-based metric instance. General text generation metrics, applicable to * other categories. Input for fluency metric. */ fluencyInput?: GoogleCloudAiplatformV1FluencyInput; /** * Input for fulfillment metric. */ fulfillmentInput?: GoogleCloudAiplatformV1FulfillmentInput; /** * Input for groundedness metric. */ groundednessInput?: GoogleCloudAiplatformV1GroundednessInput; /** * Input for Metricx metric. */ metricxInput?: GoogleCloudAiplatformV1MetricxInput; /** * Input for pairwise metric. */ pairwiseMetricInput?: GoogleCloudAiplatformV1PairwiseMetricInput; /** * Input for pairwise question answering quality metric. */ pairwiseQuestionAnsweringQualityInput?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInput; /** * Input for pairwise summarization quality metric. */ pairwiseSummarizationQualityInput?: GoogleCloudAiplatformV1PairwiseSummarizationQualityInput; /** * Input for pointwise metric. */ pointwiseMetricInput?: GoogleCloudAiplatformV1PointwiseMetricInput; /** * Input for question answering correctness metric. */ questionAnsweringCorrectnessInput?: GoogleCloudAiplatformV1QuestionAnsweringCorrectnessInput; /** * Input for question answering helpfulness metric. */ questionAnsweringHelpfulnessInput?: GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessInput; /** * Input for question answering quality metric. */ questionAnsweringQualityInput?: GoogleCloudAiplatformV1QuestionAnsweringQualityInput; /** * Input for question answering relevance metric. */ questionAnsweringRelevanceInput?: GoogleCloudAiplatformV1QuestionAnsweringRelevanceInput; /** * Instances and metric spec for rouge metric. */ rougeInput?: GoogleCloudAiplatformV1RougeInput; /** * Input for safety metric. */ safetyInput?: GoogleCloudAiplatformV1SafetyInput; /** * Input for summarization helpfulness metric. */ summarizationHelpfulnessInput?: GoogleCloudAiplatformV1SummarizationHelpfulnessInput; /** * Input for summarization quality metric. */ summarizationQualityInput?: GoogleCloudAiplatformV1SummarizationQualityInput; /** * Input for summarization verbosity metric. */ summarizationVerbosityInput?: GoogleCloudAiplatformV1SummarizationVerbosityInput; /** * Tool call metric instances. Input for tool call valid metric. */ toolCallValidInput?: GoogleCloudAiplatformV1ToolCallValidInput; /** * Input for tool name match metric. */ toolNameMatchInput?: GoogleCloudAiplatformV1ToolNameMatchInput; /** * Input for tool parameter key match metric. */ toolParameterKeyMatchInput?: GoogleCloudAiplatformV1ToolParameterKeyMatchInput; /** * Input for tool parameter key value match metric. */ toolParameterKvMatchInput?: GoogleCloudAiplatformV1ToolParameterKVMatchInput; } /** * Response message for EvaluationService.EvaluateInstances. */ export interface GoogleCloudAiplatformV1EvaluateInstancesResponse { /** * Results for bleu metric. */ bleuResults?: GoogleCloudAiplatformV1BleuResults; /** * Result for coherence metric. */ coherenceResult?: GoogleCloudAiplatformV1CoherenceResult; /** * Translation metrics. Result for Comet metric. */ cometResult?: GoogleCloudAiplatformV1CometResult; /** * Auto metric evaluation results. Results for exact match metric. */ exactMatchResults?: GoogleCloudAiplatformV1ExactMatchResults; /** * LLM-based metric evaluation result. General text generation metrics, * applicable to other categories. Result for fluency metric. */ fluencyResult?: GoogleCloudAiplatformV1FluencyResult; /** * Result for fulfillment metric. */ fulfillmentResult?: GoogleCloudAiplatformV1FulfillmentResult; /** * Result for groundedness metric. */ groundednessResult?: GoogleCloudAiplatformV1GroundednessResult; /** * Result for Metricx metric. */ metricxResult?: GoogleCloudAiplatformV1MetricxResult; /** * Result for pairwise metric. */ pairwiseMetricResult?: GoogleCloudAiplatformV1PairwiseMetricResult; /** * Result for pairwise question answering quality metric. */ pairwiseQuestionAnsweringQualityResult?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityResult; /** * Result for pairwise summarization quality metric. */ pairwiseSummarizationQualityResult?: GoogleCloudAiplatformV1PairwiseSummarizationQualityResult; /** * Generic metrics. Result for pointwise metric. */ pointwiseMetricResult?: GoogleCloudAiplatformV1PointwiseMetricResult; /** * Result for question answering correctness metric. */ questionAnsweringCorrectnessResult?: GoogleCloudAiplatformV1QuestionAnsweringCorrectnessResult; /** * Result for question answering helpfulness metric. */ questionAnsweringHelpfulnessResult?: GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessResult; /** * Question answering only metrics. Result for question answering quality * metric. */ questionAnsweringQualityResult?: GoogleCloudAiplatformV1QuestionAnsweringQualityResult; /** * Result for question answering relevance metric. */ questionAnsweringRelevanceResult?: GoogleCloudAiplatformV1QuestionAnsweringRelevanceResult; /** * Results for rouge metric. */ rougeResults?: GoogleCloudAiplatformV1RougeResults; /** * Result for safety metric. */ safetyResult?: GoogleCloudAiplatformV1SafetyResult; /** * Result for summarization helpfulness metric. */ summarizationHelpfulnessResult?: GoogleCloudAiplatformV1SummarizationHelpfulnessResult; /** * Summarization only metrics. Result for summarization quality metric. */ summarizationQualityResult?: GoogleCloudAiplatformV1SummarizationQualityResult; /** * Result for summarization verbosity metric. */ summarizationVerbosityResult?: GoogleCloudAiplatformV1SummarizationVerbosityResult; /** * Tool call metrics. Results for tool call valid metric. */ toolCallValidResults?: GoogleCloudAiplatformV1ToolCallValidResults; /** * Results for tool name match metric. */ toolNameMatchResults?: GoogleCloudAiplatformV1ToolNameMatchResults; /** * Results for tool parameter key match metric. */ toolParameterKeyMatchResults?: GoogleCloudAiplatformV1ToolParameterKeyMatchResults; /** * Results for tool parameter key value match metric. */ toolParameterKvMatchResults?: GoogleCloudAiplatformV1ToolParameterKVMatchResults; } /** * An edge describing the relationship between an Artifact and an Execution in * a lineage graph. */ export interface GoogleCloudAiplatformV1Event { /** * Required. The relative resource name of the Artifact in the Event. */ artifact?: string; /** * Output only. Time the Event occurred. */ readonly eventTime?: Date; /** * Output only. The relative resource name of the Execution in the Event. */ readonly execution?: string; /** * The labels with user-defined metadata to annotate Events. Label keys and * values can be no longer than 64 characters (Unicode codepoints), can only * contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. No more than 64 user labels can be * associated with one Event (System labels are excluded). See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. */ labels?: { [key: string]: string }; /** * Required. The type of the Event. */ type?: | "TYPE_UNSPECIFIED" | "INPUT" | "OUTPUT"; } /** * Input for exact match metric. */ export interface GoogleCloudAiplatformV1ExactMatchInput { /** * Required. Repeated exact match instances. */ instances?: GoogleCloudAiplatformV1ExactMatchInstance[]; /** * Required. Spec for exact match metric. */ metricSpec?: GoogleCloudAiplatformV1ExactMatchSpec; } /** * Spec for exact match instance. */ export interface GoogleCloudAiplatformV1ExactMatchInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Exact match metric value for an instance. */ export interface GoogleCloudAiplatformV1ExactMatchMetricValue { /** * Output only. Exact match score. */ readonly score?: number; } /** * Results for exact match metric. */ export interface GoogleCloudAiplatformV1ExactMatchResults { /** * Output only. Exact match metric values. */ readonly exactMatchMetricValues?: GoogleCloudAiplatformV1ExactMatchMetricValue[]; } /** * Spec for exact match metric - returns 1 if prediction and reference exactly * matches, otherwise 0. */ export interface GoogleCloudAiplatformV1ExactMatchSpec { } /** * Example-based explainability that returns the nearest neighbors from the * provided dataset. */ export interface GoogleCloudAiplatformV1Examples { /** * The Cloud Storage input instances. */ exampleGcsSource?: GoogleCloudAiplatformV1ExamplesExampleGcsSource; /** * The full configuration for the generated index, the semantics are the same * as metadata and should match * [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). */ nearestNeighborSearchConfig?: any; /** * The number of neighbors to return when querying for examples. */ neighborCount?: number; /** * Simplified preset configuration, which automatically sets configuration * values based on the desired query speed-precision trade-off and modality. */ presets?: GoogleCloudAiplatformV1Presets; } /** * The Cloud Storage input instances. */ export interface GoogleCloudAiplatformV1ExamplesExampleGcsSource { /** * The format in which instances are given, if not specified, assume it's * JSONL format. Currently only JSONL format is supported. */ dataFormat?: | "DATA_FORMAT_UNSPECIFIED" | "JSONL"; /** * The Cloud Storage location for the input instances. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; } /** * Overrides for example-based explanations. */ export interface GoogleCloudAiplatformV1ExamplesOverride { /** * The number of neighbors to return that have the same crowding tag. */ crowdingCount?: number; /** * The format of the data being provided with each call. */ dataFormat?: | "DATA_FORMAT_UNSPECIFIED" | "INSTANCES" | "EMBEDDINGS"; /** * The number of neighbors to return. */ neighborCount?: number; /** * Restrict the resulting nearest neighbors to respect these constraints. */ restrictions?: GoogleCloudAiplatformV1ExamplesRestrictionsNamespace[]; /** * If true, return the embeddings instead of neighbors. */ returnEmbeddings?: boolean; } /** * Restrictions namespace for example-based explanations overrides. */ export interface GoogleCloudAiplatformV1ExamplesRestrictionsNamespace { /** * The list of allowed tags. */ allow?: string[]; /** * The list of deny tags. */ deny?: string[]; /** * The namespace name. */ namespaceName?: string; } /** * Instance of a general execution. */ export interface GoogleCloudAiplatformV1Execution { /** * Output only. Timestamp when this Execution was created. */ readonly createTime?: Date; /** * Description of the Execution */ description?: string; /** * User provided display name of the Execution. May be up to 128 Unicode * characters. */ displayName?: string; /** * An eTag used to perform consistent read-modify-write updates. If not set, * a blind "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Executions. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Execution (System labels are excluded). */ labels?: { [key: string]: string }; /** * Properties of the Execution. Top level metadata keys' heading and trailing * spaces will be trimmed. The size of this field should not exceed 200KB. */ metadata?: { [key: string]: any }; /** * Output only. The resource name of the Execution. */ readonly name?: string; /** * The title of the schema describing the metadata. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaTitle?: string; /** * The version of the schema in `schema_title` to use. Schema title and * version is expected to be registered in earlier Create Schema calls. And * both are used together as unique identifiers to identify schemas within the * local metadata store. */ schemaVersion?: string; /** * The state of this Execution. This is a property of the Execution, and does * not imply or capture any ongoing process. This property is managed by * clients (such as Vertex AI Pipelines) and the system does not prescribe or * check the validity of state transitions. */ state?: | "STATE_UNSPECIFIED" | "NEW" | "RUNNING" | "COMPLETE" | "FAILED" | "CACHED" | "CANCELLED"; /** * Output only. Timestamp when this Execution was last updated. */ readonly updateTime?: Date; } /** * Request message for PredictionService.Explain. */ export interface GoogleCloudAiplatformV1ExplainRequest { /** * If specified, this ExplainRequest will be served by the chosen * DeployedModel, overriding Endpoint.traffic_split. */ deployedModelId?: string; /** * If specified, overrides the explanation_spec of the DeployedModel. Can be * used for explaining prediction results with different configurations, such * as: - Explaining top-5 predictions results as opposed to top-1; - * Increasing path count or step count of the attribution methods to reduce * approximate errors; - Using different baselines for explaining the * prediction results. */ explanationSpecOverride?: GoogleCloudAiplatformV1ExplanationSpecOverride; /** * Required. The instances that are the input to the explanation call. A * DeployedModel may have an upper limit on the number of instances it * supports per request, and when it is exceeded the explanation call errors * in case of AutoML Models, or, in case of customer created Models, the * behaviour is as documented by that Model. The schema of any single instance * may be specified via Endpoint's DeployedModels' Model's PredictSchemata's * instance_schema_uri. */ instances?: any[]; /** * The parameters that govern the prediction. The schema of the parameters * may be specified via Endpoint's DeployedModels' Model's PredictSchemata's * parameters_schema_uri. */ parameters?: any; } /** * Response message for PredictionService.Explain. */ export interface GoogleCloudAiplatformV1ExplainResponse { /** * ID of the Endpoint's DeployedModel that served this explanation. */ deployedModelId?: string; /** * The explanations of the Model's PredictResponse.predictions. It has the * same number of elements as instances to be explained. */ explanations?: GoogleCloudAiplatformV1Explanation[]; /** * The predictions that are the output of the predictions call. Same as * PredictResponse.predictions. */ predictions?: any[]; } /** * Explanation of a prediction (provided in PredictResponse.predictions) * produced by the Model on a given instance. */ export interface GoogleCloudAiplatformV1Explanation { /** * Output only. Feature attributions grouped by predicted outputs. For Models * that predict only one output, such as regression Models that predict only * one score, there is only one attibution that explains the predicted output. * For Models that predict multiple outputs, such as multiclass Models that * predict multiple classes, each element explains one specific item. * Attribution.output_index can be used to identify which output this * attribution is explaining. By default, we provide Shapley values for the * predicted class. However, you can configure the explanation request to * generate Shapley values for any other classes too. For example, if a model * predicts a probability of `0.4` for approving a loan application, the * model's decision is to reject the application since `p(reject) = 0.6 > * p(approve) = 0.4`, and the default Shapley values would be computed for * rejection decision and not approval, even though the latter might be the * positive class. If users set ExplanationParameters.top_k, the attributions * are sorted by instance_output_value in descending order. If * ExplanationParameters.output_indices is specified, the attributions are * stored by Attribution.output_index in the same order as they appear in the * output_indices. */ readonly attributions?: GoogleCloudAiplatformV1Attribution[]; /** * Output only. List of the nearest neighbors for example-based explanations. * For models deployed with the examples explanations feature enabled, the * attributions field is empty and instead the neighbors field is populated. */ readonly neighbors?: GoogleCloudAiplatformV1Neighbor[]; } /** * Metadata describing the Model's input and output for explanation. */ export interface GoogleCloudAiplatformV1ExplanationMetadata { /** * Points to a YAML file stored on Google Cloud Storage describing the format * of the feature attributions. The schema is defined as an OpenAPI 3.0.2 * [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * AutoML tabular Models always have this field populated by Vertex AI. Note: * The URI given on output may be different, including the URI scheme, than * the one given on input. The output URI will point to a location where the * user only has a read access. */ featureAttributionsSchemaUri?: string; /** * Required. Map from feature names to feature input metadata. Keys are the * name of the features. Values are the specification of the feature. An empty * InputMetadata is valid. It describes a text feature which has the name * specified as the key in ExplanationMetadata.inputs. The baseline of the * empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow * images, the key can be any friendly name of the feature. Once specified, * featureAttributions are keyed by this key (if not grouped with another * feature). For custom images, the key must match with the key in instance. */ inputs?: { [key: string]: GoogleCloudAiplatformV1ExplanationMetadataInputMetadata }; /** * Name of the source to generate embeddings for example based explanations. */ latentSpaceSource?: string; /** * Required. Map from output names to output metadata. For Vertex AI-provided * Tensorflow images, keys can be any user defined string that consists of any * UTF-8 characters. For custom images, keys are the name of the output field * in the prediction to be explained. Currently only one key is allowed. */ outputs?: { [key: string]: GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata }; } /** * Metadata of the input of a feature. Fields other than * InputMetadata.input_baselines are applicable only for Models that are using * Vertex AI-provided images for Tensorflow. */ export interface GoogleCloudAiplatformV1ExplanationMetadataInputMetadata { /** * Specifies the shape of the values of the input if the input is a sparse * representation. Refer to Tensorflow documentation for more details: * https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ denseShapeTensorName?: string; /** * A list of baselines for the encoded tensor. The shape of each baseline * should match the shape of the encoded tensor. If a scalar is provided, * Vertex AI broadcasts to the same shape as the encoded tensor. */ encodedBaselines?: any[]; /** * Encoded tensor is a transformation of the input tensor. Must be provided * if choosing Integrated Gradients attribution or XRAI attribution and the * input tensor is not differentiable. An encoded tensor is generated if the * input tensor is encoded by a lookup table. */ encodedTensorName?: string; /** * Defines how the feature is encoded into the input tensor. Defaults to * IDENTITY. */ encoding?: | "ENCODING_UNSPECIFIED" | "IDENTITY" | "BAG_OF_FEATURES" | "BAG_OF_FEATURES_SPARSE" | "INDICATOR" | "COMBINED_EMBEDDING" | "CONCAT_EMBEDDING"; /** * The domain details of the input feature value. Like min/max, original mean * or standard deviation if normalized. */ featureValueDomain?: GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain; /** * Name of the group that the input belongs to. Features with the same group * name will be treated as one feature when computing attributions. Features * grouped together can have different shapes in value. If provided, there * will be one single attribution generated in * Attribution.feature_attributions, keyed by the group name. */ groupName?: string; /** * A list of feature names for each index in the input tensor. Required when * the input InputMetadata.encoding is BAG_OF_FEATURES, * BAG_OF_FEATURES_SPARSE, INDICATOR. */ indexFeatureMapping?: string[]; /** * Specifies the index of the values of the input tensor. Required when the * input tensor is a sparse representation. Refer to Tensorflow documentation * for more details: * https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ indicesTensorName?: string; /** * Baseline inputs for this feature. If no baseline is specified, Vertex AI * chooses the baseline for this feature. If multiple baselines are specified, * Vertex AI returns the average attributions across them in * Attribution.feature_attributions. For Vertex AI-provided Tensorflow images * (both 1.x and 2.x), the shape of each baseline must match the shape of the * input tensor. If a scalar is provided, we broadcast to the same shape as * the input tensor. For custom images, the element of the baselines must be * in the same format as the feature's input in the instance[]. The schema of * any single instance may be specified via Endpoint's DeployedModels' Model's * PredictSchemata's instance_schema_uri. */ inputBaselines?: any[]; /** * Name of the input tensor for this feature. Required and is only applicable * to Vertex AI-provided images for Tensorflow. */ inputTensorName?: string; /** * Modality of the feature. Valid values are: numeric, image. Defaults to * numeric. */ modality?: string; /** * Visualization configurations for image explanation. */ visualization?: GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization; } /** * Domain details of the input feature value. Provides numeric information * about the feature, such as its range (min, max). If the feature has been * pre-processed, for example with z-scoring, then it provides information about * how to recover the original feature. For example, if the input feature is an * image and it has been pre-processed to obtain 0-mean and stddev = 1 values, * then original_mean, and original_stddev refer to the mean and stddev of the * original feature (e.g. image tensor) from which input feature (with mean = 0 * and stddev = 1) was obtained. */ export interface GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain { /** * The maximum permissible value for this feature. */ maxValue?: number; /** * The minimum permissible value for this feature. */ minValue?: number; /** * If this input feature has been normalized to a mean value of 0, the * original_mean specifies the mean value of the domain prior to * normalization. */ originalMean?: number; /** * If this input feature has been normalized to a standard deviation of 1.0, * the original_stddev specifies the standard deviation of the domain prior to * normalization. */ originalStddev?: number; } /** * Visualization configurations for image explanation. */ export interface GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization { /** * Excludes attributions below the specified percentile, from the highlighted * areas. Defaults to 62. */ clipPercentLowerbound?: number; /** * Excludes attributions above the specified percentile from the highlighted * areas. Using the clip_percent_upperbound and clip_percent_lowerbound * together can be useful for filtering out noise and making it easier to see * areas of strong attribution. Defaults to 99.9. */ clipPercentUpperbound?: number; /** * The color scheme used for the highlighted areas. Defaults to PINK_GREEN * for Integrated Gradients attribution, which shows positive attributions in * green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which * highlights the most influential regions in yellow and the least influential * in blue. */ colorMap?: | "COLOR_MAP_UNSPECIFIED" | "PINK_GREEN" | "VIRIDIS" | "RED" | "GREEN" | "RED_GREEN" | "PINK_WHITE_GREEN"; /** * How the original image is displayed in the visualization. Adjusting the * overlay can help increase visual clarity if the original image makes it * difficult to view the visualization. Defaults to NONE. */ overlayType?: | "OVERLAY_TYPE_UNSPECIFIED" | "NONE" | "ORIGINAL" | "GRAYSCALE" | "MASK_BLACK"; /** * Whether to only highlight pixels with positive contributions, negative or * both. Defaults to POSITIVE. */ polarity?: | "POLARITY_UNSPECIFIED" | "POSITIVE" | "NEGATIVE" | "BOTH"; /** * Type of the image visualization. Only applicable to Integrated Gradients * attribution. OUTLINES shows regions of attribution, while PIXELS shows * per-pixel attribution. Defaults to OUTLINES. */ type?: | "TYPE_UNSPECIFIED" | "PIXELS" | "OUTLINES"; } /** * Metadata of the prediction output to be explained. */ export interface GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata { /** * Specify a field name in the prediction to look for the display name. Use * this if the prediction contains the display names for the outputs. The * display names in the prediction must have the same shape of the outputs, so * that it can be located by Attribution.output_index for a specific output. */ displayNameMappingKey?: string; /** * Static mapping between the index and display name. Use this if the outputs * are a deterministic n-dimensional array, e.g. a list of scores of all the * classes in a pre-defined order for a multi-classification Model. It's not * feasible if the outputs are non-deterministic, e.g. the Model produces * top-k classes or sort the outputs by their values. The shape of the value * must be an n-dimensional array of strings. The number of dimensions must * match that of the outputs to be explained. The * Attribution.output_display_name is populated by locating in the mapping * with Attribution.output_index. */ indexDisplayNameMapping?: any; /** * Name of the output tensor. Required and is only applicable to Vertex AI * provided images for Tensorflow. */ outputTensorName?: string; } /** * The ExplanationMetadata entries that can be overridden at online explanation * time. */ export interface GoogleCloudAiplatformV1ExplanationMetadataOverride { /** * Required. Overrides the input metadata of the features. The key is the * name of the feature to be overridden. The keys specified here must exist in * the input metadata to be overridden. If a feature is not specified here, * the corresponding feature's input metadata is not overridden. */ inputs?: { [key: string]: GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride }; } /** * The input metadata entries to be overridden. */ export interface GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride { /** * Baseline inputs for this feature. This overrides the `input_baseline` * field of the ExplanationMetadata.InputMetadata object of the corresponding * feature's input metadata. If it's not specified, the original baselines are * not overridden. */ inputBaselines?: any[]; } /** * Parameters to configure explaining for Model's predictions. */ export interface GoogleCloudAiplatformV1ExplanationParameters { /** * Example-based explanations that returns the nearest neighbors from the * provided dataset. */ examples?: GoogleCloudAiplatformV1Examples; /** * An attribution method that computes Aumann-Shapley values taking advantage * of the model's fully differentiable structure. Refer to this paper for more * details: https://arxiv.org/abs/1703.01365 */ integratedGradientsAttribution?: GoogleCloudAiplatformV1IntegratedGradientsAttribution; /** * If populated, only returns attributions that have output_index contained * in output_indices. It must be an ndarray of integers, with the same shape * of the output it's explaining. If not populated, returns attributions for * top_k indices of outputs. If neither top_k nor output_indices is populated, * returns the argmax index of the outputs. Only applicable to Models that * predict multiple outputs (e,g, multi-class Models that predict multiple * classes). */ outputIndices?: any[]; /** * An attribution method that approximates Shapley values for features that * contribute to the label being predicted. A sampling strategy is used to * approximate the value rather than considering all subsets of features. * Refer to this paper for model details: https://arxiv.org/abs/1306.4265. */ sampledShapleyAttribution?: GoogleCloudAiplatformV1SampledShapleyAttribution; /** * If populated, returns attributions for top K indices of outputs (defaults * to 1). Only applies to Models that predicts more than one outputs (e,g, * multi-class Models). When set to -1, returns explanations for all outputs. */ topK?: number; /** * An attribution method that redistributes Integrated Gradients attribution * to segmented regions, taking advantage of the model's fully differentiable * structure. Refer to this paper for more details: * https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural * images, like a picture of a house or an animal. If the images are taken in * artificial environments, like a lab or manufacturing line, or from * diagnostic equipment, like x-rays or quality-control cameras, use * Integrated Gradients instead. */ xraiAttribution?: GoogleCloudAiplatformV1XraiAttribution; } /** * Specification of Model explanation. */ export interface GoogleCloudAiplatformV1ExplanationSpec { /** * Optional. Metadata describing the Model's input and output for * explanation. */ metadata?: GoogleCloudAiplatformV1ExplanationMetadata; /** * Required. Parameters that configure explaining of the Model's predictions. */ parameters?: GoogleCloudAiplatformV1ExplanationParameters; } /** * The ExplanationSpec entries that can be overridden at online explanation * time. */ export interface GoogleCloudAiplatformV1ExplanationSpecOverride { /** * The example-based explanations parameter overrides. */ examplesOverride?: GoogleCloudAiplatformV1ExamplesOverride; /** * The metadata to be overridden. If not specified, no metadata is * overridden. */ metadata?: GoogleCloudAiplatformV1ExplanationMetadataOverride; /** * The parameters to be overridden. Note that the attribution method cannot * be changed. If not specified, no parameter is overridden. */ parameters?: GoogleCloudAiplatformV1ExplanationParameters; } /** * Describes what part of the Dataset is to be exported, the destination of the * export and how to export. */ export interface GoogleCloudAiplatformV1ExportDataConfig { /** * The Cloud Storage URI that points to a YAML file describing the annotation * schema. The schema is defined as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * The schema files that can be used here are found in * gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the * chosen schema must be consistent with metadata of the Dataset specified by * ExportDataRequest.name. Only used for custom training data export use * cases. Only applicable to Datasets that have DataItems and Annotations. * Only Annotations that both match this schema and belong to DataItems not * ignored by the split method are used in respectively training, validation * or test role, depending on the role of the DataItem they are on. When used * in conjunction with annotations_filter, the Annotations used for training * are filtered by both annotations_filter and annotation_schema_uri. */ annotationSchemaUri?: string; /** * An expression for filtering what part of the Dataset is to be exported. * Only Annotations that match this filter will be exported. The filter syntax * is the same as in ListAnnotations. */ annotationsFilter?: string; /** * Indicates the usage of the exported files. */ exportUse?: | "EXPORT_USE_UNSPECIFIED" | "CUSTOM_CODE_TRAINING"; /** * Split based on the provided filters for each set. */ filterSplit?: GoogleCloudAiplatformV1ExportFilterSplit; /** * Split based on fractions defining the size of each set. */ fractionSplit?: GoogleCloudAiplatformV1ExportFractionSplit; /** * The Google Cloud Storage location where the output is to be written to. In * the given directory a new directory will be created with name: * `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 * format. All export output will be written into that directory. Inside that * directory, annotations with the same schema will be grouped into sub * directories which are named with the corresponding annotations' schema * title. Inside these sub directories, a schema.yaml will be created to * describe the output format. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; /** * The ID of a SavedQuery (annotation set) under the Dataset specified by * ExportDataRequest.name used for filtering Annotations for training. Only * used for custom training data export use cases. Only applicable to Datasets * that have SavedQueries. Only Annotations that are associated with this * SavedQuery are used in respectively training. When used in conjunction with * annotations_filter, the Annotations used for training are filtered by both * saved_query_id and annotations_filter. Only one of saved_query_id and * annotation_schema_uri should be specified as both of them represent the * same thing: problem type. */ savedQueryId?: string; } /** * Runtime operation information for DatasetService.ExportData. */ export interface GoogleCloudAiplatformV1ExportDataOperationMetadata { /** * A Google Cloud Storage directory which path ends with '/'. The exported * data is stored in the directory. */ gcsOutputDirectory?: string; /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for DatasetService.ExportData. */ export interface GoogleCloudAiplatformV1ExportDataRequest { /** * Required. The desired output location. */ exportConfig?: GoogleCloudAiplatformV1ExportDataConfig; } /** * Response message for DatasetService.ExportData. */ export interface GoogleCloudAiplatformV1ExportDataResponse { /** * Only present for custom code training export use case. Records data stats, * i.e., train/validation/test item/annotation counts calculated during the * export operation. */ dataStats?: GoogleCloudAiplatformV1ModelDataStats; /** * All of the files that are exported in this export operation. For custom * code training export, only three (training, validation and test) Cloud * Storage paths in wildcard format are populated (for example, * gs://.../training-*). */ exportedFiles?: string[]; } function serializeGoogleCloudAiplatformV1ExportDataResponse(data: any): GoogleCloudAiplatformV1ExportDataResponse { return { ...data, dataStats: data["dataStats"] !== undefined ? serializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportDataResponse(data: any): GoogleCloudAiplatformV1ExportDataResponse { return { ...data, dataStats: data["dataStats"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, }; } /** * Details of operations that exports Features values. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesOperationMetadata { /** * Operation metadata for Featurestore export Feature values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.ExportFeatureValues. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesRequest { /** * Required. Specifies destination location and format. */ destination?: GoogleCloudAiplatformV1FeatureValueDestination; /** * Required. Selects Features to export values of. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; /** * Exports all historical values of all entities of the EntityType within a * time range */ fullExport?: GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport; /** * Per-Feature export settings. */ settings?: GoogleCloudAiplatformV1DestinationFeatureSetting[]; /** * Exports the latest Feature values of all entities of the EntityType within * a time range. */ snapshotExport?: GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport; } function serializeGoogleCloudAiplatformV1ExportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequest { return { ...data, fullExport: data["fullExport"] !== undefined ? serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data["fullExport"]) : undefined, snapshotExport: data["snapshotExport"] !== undefined ? serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data["snapshotExport"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequest { return { ...data, fullExport: data["fullExport"] !== undefined ? deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data["fullExport"]) : undefined, snapshotExport: data["snapshotExport"] !== undefined ? deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data["snapshotExport"]) : undefined, }; } /** * Describes exporting all historical Feature values of all entities of the * EntityType between [start_time, end_time]. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { /** * Exports Feature values as of this timestamp. If not set, retrieve values * as of now. Timestamp, if present, must not have higher than millisecond * precision. */ endTime?: Date; /** * Excludes Feature values with feature generation timestamp before this * timestamp. If not set, retrieve oldest values kept in Feature Store. * Timestamp, if present, must not have higher than millisecond precision. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Describes exporting the latest Feature values of all entities of the * EntityType between [start_time, snapshot_time]. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { /** * Exports Feature values as of this timestamp. If not set, retrieve values * as of now. Timestamp, if present, must not have higher than millisecond * precision. */ snapshotTime?: Date; /** * Excludes Feature values with feature generation timestamp before this * timestamp. If not set, retrieve oldest values kept in Feature Store. * Timestamp, if present, must not have higher than millisecond precision. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { return { ...data, snapshotTime: data["snapshotTime"] !== undefined ? data["snapshotTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { return { ...data, snapshotTime: data["snapshotTime"] !== undefined ? new Date(data["snapshotTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Response message for FeaturestoreService.ExportFeatureValues. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesResponse { } /** * Assigns input data to training, validation, and test sets based on the given * filters, data pieces not matched by any filter are ignored. Currently only * supported for Datasets containing DataItems. If any of the filters in this * message are to match nothing, then they can be set as '-' (the minus sign). * Supported only for unstructured Datasets. */ export interface GoogleCloudAiplatformV1ExportFilterSplit { /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to test the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ testFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to train the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ trainingFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to validate the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ validationFilter?: string; } /** * Assigns the input data to training, validation, and test sets as per the * given fractions. Any of `training_fraction`, `validation_fraction` and * `test_fraction` may optionally be provided, they must sum to up to 1. If the * provided ones sum to less than 1, the remainder is assigned to sets as * decided by Vertex AI. If none of the fractions are set, by default roughly * 80% of data is used for training, 10% for validation, and 10% for test. */ export interface GoogleCloudAiplatformV1ExportFractionSplit { /** * The fraction of the input data that is to be used to evaluate the Model. */ testFraction?: number; /** * The fraction of the input data that is to be used to train the Model. */ trainingFraction?: number; /** * The fraction of the input data that is to be used to validate the Model. */ validationFraction?: number; } /** * Details of ModelService.ExportModel operation. */ export interface GoogleCloudAiplatformV1ExportModelOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Output only. Information further describing the output of this Model * export. */ readonly outputInfo?: GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo; } /** * Further describes the output of the ExportModel. Supplements * ExportModelRequest.OutputConfig. */ export interface GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo { /** * Output only. If the Model artifact is being exported to Google Cloud * Storage this is the full path of the directory created, into which the * Model files are being written to. */ readonly artifactOutputUri?: string; /** * Output only. If the Model image is being exported to Google Container * Registry or Artifact Registry this is the full path of the image created. */ readonly imageOutputUri?: string; } /** * Request message for ModelService.ExportModel. */ export interface GoogleCloudAiplatformV1ExportModelRequest { /** * Required. The desired output location and configuration. */ outputConfig?: GoogleCloudAiplatformV1ExportModelRequestOutputConfig; } /** * Output configuration for the Model export. */ export interface GoogleCloudAiplatformV1ExportModelRequestOutputConfig { /** * The Cloud Storage location where the Model artifact is to be written to. * Under the directory given as the destination a new one with name * "`model-export--`", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 * format, will be created. Inside, the Model and any of its supporting files * will be written. This field should only be set when the `exportableContent` * field of the [Model.supported_export_formats] object contains `ARTIFACT`. */ artifactDestination?: GoogleCloudAiplatformV1GcsDestination; /** * The ID of the format in which the Model must be exported. Each Model lists * the export formats it supports. If no value is provided here, then the * first from the list of the Model's supported formats is used by default. */ exportFormatId?: string; /** * The Google Container Registry or Artifact Registry uri where the Model * container image will be copied to. This field should only be set when the * `exportableContent` field of the [Model.supported_export_formats] object * contains `IMAGE`. */ imageDestination?: GoogleCloudAiplatformV1ContainerRegistryDestination; } /** * Response message of ModelService.ExportModel operation. */ export interface GoogleCloudAiplatformV1ExportModelResponse { } /** * Request message for TensorboardService.ExportTensorboardTimeSeriesData. */ export interface GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataRequest { /** * Exports the TensorboardTimeSeries' data that match the filter expression. */ filter?: string; /** * Field to use to sort the TensorboardTimeSeries' data. By default, * TensorboardTimeSeries' data is returned in a pseudo random order. */ orderBy?: string; /** * The maximum number of data points to return per page. The default * page_size is 1000. Values must be between 1 and 10000. Values above 10000 * are coerced to 10000. */ pageSize?: number; /** * A page token, received from a previous ExportTensorboardTimeSeriesData * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to ExportTensorboardTimeSeriesData must match the * call that provided the page token. */ pageToken?: string; } /** * Response message for TensorboardService.ExportTensorboardTimeSeriesData. */ export interface GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { /** * A token, which can be sent as page_token to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * The returned time series data points. */ timeSeriesDataPoints?: GoogleCloudAiplatformV1TimeSeriesDataPoint[]; } function serializeGoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesDataPoints: data["timeSeriesDataPoints"] !== undefined ? data["timeSeriesDataPoints"].map((item: any) => (serializeGoogleCloudAiplatformV1TimeSeriesDataPoint(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesDataPoints: data["timeSeriesDataPoints"] !== undefined ? data["timeSeriesDataPoints"].map((item: any) => (deserializeGoogleCloudAiplatformV1TimeSeriesDataPoint(item))) : undefined, }; } /** * The fact used in grounding. */ export interface GoogleCloudAiplatformV1Fact { /** * Query that is used to retrieve this fact. */ query?: string; /** * If present, according to the underlying Vector DB and the selected metric * type, the score can be either the distance or the similarity between the * query and the fact and its range depends on the metric type. For example, * if the metric type is COSINE_DISTANCE, it represents the distance between * the query and the fact. The larger the distance, the less relevant the fact * is to the query. The range is [0, 2], while 0 means the most relevant and 2 * means the least relevant. */ score?: number; /** * If present, the summary/snippet of the fact. */ summary?: string; /** * If present, it refers to the title of this fact. */ title?: string; /** * If present, this uri links to the source of the fact. */ uri?: string; /** * If present, the distance between the query vector and this fact vector. */ vectorDistance?: number; } /** * Configuration for faster model deployment. */ export interface GoogleCloudAiplatformV1FasterDeploymentConfig { /** * If true, enable fast tryout feature for this deployed model. */ fastTryoutEnabled?: boolean; } /** * Feature Metadata information. For example, color is a feature that describes * an apple. */ export interface GoogleCloudAiplatformV1Feature { /** * Output only. Only applicable for Vertex AI Feature Store (Legacy). * Timestamp when this EntityType was created. */ readonly createTime?: Date; /** * Description of the Feature. */ description?: string; /** * Optional. Only applicable for Vertex AI Feature Store (Legacy). If not * set, use the monitoring_config defined for the EntityType this Feature * belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, * DOUBLE or INT64 can enable monitoring. If set to true, all types of data * monitoring are disabled despite the config on EntityType. */ disableMonitoring?: boolean; /** * Used to perform a consistent read-modify-write updates. If not set, a * blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your Features. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information on and examples of labels. No * more than 64 user labels can be associated with one Feature (System labels * are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Output only. Only applicable for Vertex AI Feature Store (Legacy). The * list of historical stats and anomalies with specified objectives. */ readonly monitoringStatsAnomalies?: GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly[]; /** * Immutable. Name of the Feature. Format: * `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` * `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` * The last part feature is assigned by the client. The feature can be up to * 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, * underscore(_), and ASCII digits 0-9 starting with a letter. The value will * be unique given an entity type. */ name?: string; /** * Entity responsible for maintaining this feature. Can be comma separated * list of email addresses or URIs. */ pointOfContact?: string; /** * Output only. Only applicable for Vertex AI Feature Store (Legacy). * Timestamp when this EntityType was most recently updated. */ readonly updateTime?: Date; /** * Immutable. Only applicable for Vertex AI Feature Store (Legacy). Type of * Feature value. */ valueType?: | "VALUE_TYPE_UNSPECIFIED" | "BOOL" | "BOOL_ARRAY" | "DOUBLE" | "DOUBLE_ARRAY" | "INT64" | "INT64_ARRAY" | "STRING" | "STRING_ARRAY" | "BYTES" | "STRUCT"; /** * Only applicable for Vertex AI Feature Store. The name of the BigQuery * Table/View column hosting data for this version. If no value is provided, * will use feature_id. */ versionColumnName?: string; } /** * Vertex AI Feature Group. */ export interface GoogleCloudAiplatformV1FeatureGroup { /** * Indicates that features for this group come from BigQuery Table/View. By * default treats the source as a sparse time series source. The BigQuery * source table or view must have at least one entity ID column and a column * named `feature_timestamp`. */ bigQuery?: GoogleCloudAiplatformV1FeatureGroupBigQuery; /** * Output only. Timestamp when this FeatureGroup was created. */ readonly createTime?: Date; /** * Optional. Description of the FeatureGroup. */ description?: string; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * FeatureGroup. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one FeatureGroup(System * labels are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Identifier. Name of the FeatureGroup. Format: * `projects/{project}/locations/{location}/featureGroups/{featureGroup}` */ name?: string; /** * Output only. Timestamp when this FeatureGroup was last updated. */ readonly updateTime?: Date; } /** * Input source type for BigQuery Tables and Views. */ export interface GoogleCloudAiplatformV1FeatureGroupBigQuery { /** * Required. Immutable. The BigQuery source URI that points to either a * BigQuery Table or View. */ bigQuerySource?: GoogleCloudAiplatformV1BigQuerySource; /** * Optional. If set, all feature values will be fetched from a single row per * unique entityId including nulls. If not set, will collapse all rows for * each unique entityId into a singe row with any non-null values if present, * if no non-null values are present will sync null. ex: If source has schema * `(entity_id, feature_timestamp, f0, f1)` and the following rows: `(e1, * 2020-01-01T10:00:00.123Z, 10, 15)` `(e1, 2020-02-01T10:00:00.123Z, 20, * null)` If dense is set, `(e1, 20, null)` is synced to online stores. If * dense is not set, `(e1, 20, 15)` is synced to online stores. */ dense?: boolean; /** * Optional. Columns to construct entity_id / row keys. If not provided * defaults to `entity_id`. */ entityIdColumns?: string[]; /** * Optional. Set if the data source is not a time-series. */ staticDataSource?: boolean; /** * Optional. If the source is a time-series source, this can be set to * control how downstream sources (ex: FeatureView ) will treat time-series * sources. If not set, will treat the source as a time-series source with * `feature_timestamp` as timestamp column and no scan boundary. */ timeSeries?: GoogleCloudAiplatformV1FeatureGroupBigQueryTimeSeries; } export interface GoogleCloudAiplatformV1FeatureGroupBigQueryTimeSeries { /** * Optional. Column hosting timestamp values for a time-series source. Will * be used to determine the latest `feature_values` for each entity. Optional. * If not provided, column named `feature_timestamp` of type `TIMESTAMP` will * be used. */ timestampColumn?: string; } /** * A list of historical SnapshotAnalysis or ImportFeaturesAnalysis stats * requested by user, sorted by FeatureStatsAnomaly.start_time descending. */ export interface GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly { /** * Output only. The stats and anomalies generated at specific timestamp. */ readonly featureStatsAnomaly?: GoogleCloudAiplatformV1FeatureStatsAnomaly; /** * Output only. The objective for each stats. */ readonly objective?: | "OBJECTIVE_UNSPECIFIED" | "IMPORT_FEATURE_ANALYSIS" | "SNAPSHOT_ANALYSIS"; } /** * Noise sigma by features. Noise sigma represents the standard deviation of * the gaussian kernel that will be used to add noise to interpolated inputs * prior to computing gradients. */ export interface GoogleCloudAiplatformV1FeatureNoiseSigma { /** * Noise sigma per feature. No noise is added to features that are not set. */ noiseSigma?: GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature[]; } /** * Noise sigma for a single feature. */ export interface GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature { /** * The name of the input feature for which noise sigma is provided. The * features are defined in explanation metadata inputs. */ name?: string; /** * This represents the standard deviation of the Gaussian kernel that will be * used to add noise to the feature prior to computing gradients. Similar to * noise_sigma but represents the noise added to the current feature. Defaults * to 0.1. */ sigma?: number; } /** * Vertex AI Feature Online Store provides a centralized repository for serving * ML features and embedding indexes at low latency. The Feature Online Store is * a top-level container. */ export interface GoogleCloudAiplatformV1FeatureOnlineStore { /** * Contains settings for the Cloud Bigtable instance that will be created to * serve featureValues for all FeatureViews under this FeatureOnlineStore. */ bigtable?: GoogleCloudAiplatformV1FeatureOnlineStoreBigtable; /** * Output only. Timestamp when this FeatureOnlineStore was created. */ readonly createTime?: Date; /** * Optional. The dedicated serving endpoint for this FeatureOnlineStore, * which is different from common Vertex service endpoint. */ dedicatedServingEndpoint?: GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint; /** * Optional. Customer-managed encryption key spec for data storage. If set, * online store will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * FeatureOnlineStore. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information on and examples of * labels. No more than 64 user labels can be associated with one * FeatureOnlineStore(System labels are excluded)." System reserved label keys * are prefixed with "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Identifier. Name of the FeatureOnlineStore. Format: * `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` */ name?: string; /** * Contains settings for the Optimized store that will be created to serve * featureValues for all FeatureViews under this FeatureOnlineStore. When * choose Optimized storage type, need to set * PrivateServiceConnectConfig.enable_private_service_connect to use private * endpoint. Otherwise will use public endpoint by default. */ optimized?: GoogleCloudAiplatformV1FeatureOnlineStoreOptimized; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. State of the featureOnlineStore. */ readonly state?: | "STATE_UNSPECIFIED" | "STABLE" | "UPDATING"; /** * Output only. Timestamp when this FeatureOnlineStore was last updated. */ readonly updateTime?: Date; } export interface GoogleCloudAiplatformV1FeatureOnlineStoreBigtable { /** * Required. Autoscaling config applied to Bigtable Instance. */ autoScaling?: GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling; } export interface GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling { /** * Optional. A percentage of the cluster's CPU capacity. Can be from 10% to * 80%. When a cluster's CPU utilization exceeds the target that you have set, * Bigtable immediately adds nodes to the cluster. When CPU utilization is * substantially lower than the target, Bigtable removes nodes. If not set * will default to 50%. */ cpuUtilizationTarget?: number; /** * Required. The maximum number of nodes to scale up to. Must be greater than * or equal to min_node_count, and less than or equal to 10 times of * 'min_node_count'. */ maxNodeCount?: number; /** * Required. The minimum number of nodes to scale down to. Must be greater * than or equal to 1. */ minNodeCount?: number; } /** * The dedicated serving endpoint for this FeatureOnlineStore. Only need to set * when you choose Optimized storage type. Public endpoint is provisioned by * default. */ export interface GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint { /** * Optional. Private service connect config. The private service connection * is available only for Optimized storage type, not for embedding management * now. If PrivateServiceConnectConfig.enable_private_service_connect set to * true, customers will use private service connection to send request. * Otherwise, the connection will set to public endpoint. */ privateServiceConnectConfig?: GoogleCloudAiplatformV1PrivateServiceConnectConfig; /** * Output only. This field will be populated with the domain name to use for * this FeatureOnlineStore */ readonly publicEndpointDomainName?: string; /** * Output only. The name of the service attachment resource. Populated if * private service connect is enabled and after FeatureViewSync is created. */ readonly serviceAttachment?: string; } /** * Optimized storage type */ export interface GoogleCloudAiplatformV1FeatureOnlineStoreOptimized { } /** * Selector for Features of an EntityType. */ export interface GoogleCloudAiplatformV1FeatureSelector { /** * Required. Matches Features based on ID. */ idMatcher?: GoogleCloudAiplatformV1IdMatcher; } /** * Stats and Anomaly generated at specific timestamp for specific Feature. The * start_time and end_time are used to define the time range of the dataset that * current stats belongs to, e.g. prediction traffic is bucketed into prediction * datasets by time window. If the Dataset is not defined by time window, * start_time = end_time. Timestamp of the stats and anomalies always refers to * end_time. Raw stats and anomalies are stored in stats_uri or anomaly_uri in * the tensorflow defined protos. Field data_stats contains almost identical * information with the raw stats in Vertex AI defined proto, for UI to display. */ export interface GoogleCloudAiplatformV1FeatureStatsAnomaly { /** * This is the threshold used when detecting anomalies. The threshold can be * changed by user, so this one might be different from ThresholdConfig.value. */ anomalyDetectionThreshold?: number; /** * Path of the anomaly file for current feature values in Cloud Storage * bucket. Format: gs:////anomalies. Example: * gs://monitoring_bucket/feature_name/anomalies. Stats are stored as binary * format with Protobuf message Anoamlies are stored as binary format with * Protobuf message [tensorflow.metadata.v0.AnomalyInfo] * (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). */ anomalyUri?: string; /** * Deviation from the current stats to baseline stats. 1. For categorical * feature, the distribution distance is calculated by L-inifinity norm. 2. * For numerical feature, the distribution distance is calculated by * Jensen–Shannon divergence. */ distributionDeviation?: number; /** * The end timestamp of window where stats were generated. For objectives * where time window doesn't make sense (e.g. Featurestore Snapshot * Monitoring), end_time indicates the timestamp of the data used to generate * stats (e.g. timestamp we take snapshots for feature values). */ endTime?: Date; /** * Feature importance score, only populated when cross-feature monitoring is * enabled. For now only used to represent feature attribution score within * range [0, 1] for * ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW and * ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT. */ score?: number; /** * The start timestamp of window where stats were generated. For objectives * where time window doesn't make sense (e.g. Featurestore Snapshot * Monitoring), start_time is only used to indicate the monitoring intervals, * so it always equals to (end_time - monitoring_interval). */ startTime?: Date; /** * Path of the stats file for current feature values in Cloud Storage bucket. * Format: gs:////stats. Example: gs://monitoring_bucket/feature_name/stats. * Stats are stored as binary format with Protobuf message * [tensorflow.metadata.v0.FeatureNameStatistics](https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/statistics.proto). */ statsUri?: string; } function serializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data: any): GoogleCloudAiplatformV1FeatureStatsAnomaly { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data: any): GoogleCloudAiplatformV1FeatureStatsAnomaly { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Vertex AI Feature Store provides a centralized repository for organizing, * storing, and serving ML features. The Featurestore is a top-level container * for your features and their values. */ export interface GoogleCloudAiplatformV1Featurestore { /** * Output only. Timestamp when this Featurestore was created. */ readonly createTime?: Date; /** * Optional. Customer-managed encryption key spec for data storage. If set, * both of the online and offline data storage will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * Featurestore. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one Featurestore(System * labels are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Output only. Name of the Featurestore. Format: * `projects/{project}/locations/{location}/featurestores/{featurestore}` */ readonly name?: string; /** * Optional. Config for online storage resources. The field should not * co-exist with the field of `OnlineStoreReplicationConfig`. If both of it * and OnlineStoreReplicationConfig are unset, the feature store will not have * an online store and cannot be used for online serving. */ onlineServingConfig?: GoogleCloudAiplatformV1FeaturestoreOnlineServingConfig; /** * Optional. TTL in days for feature values that will be stored in online * serving storage. The Feature Store online storage periodically removes * obsolete feature values older than `online_storage_ttl_days` since the * feature generation time. Note that `online_storage_ttl_days` should be less * than or equal to `offline_storage_ttl_days` for each EntityType under a * featurestore. If not set, default to 4000 days */ onlineStorageTtlDays?: number; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. State of the featurestore. */ readonly state?: | "STATE_UNSPECIFIED" | "STABLE" | "UPDATING"; /** * Output only. Timestamp when this Featurestore was last updated. */ readonly updateTime?: Date; } /** * Configuration of how features in Featurestore are monitored. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfig { /** * Threshold for categorical features of anomaly detection. This is shared by * all types of Featurestore Monitoring for categorical features (i.e. * Features with type (Feature.ValueType) BOOL or STRING). */ categoricalThresholdConfig?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig; /** * The config for ImportFeatures Analysis Based Feature Monitoring. */ importFeaturesAnalysis?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigImportFeaturesAnalysis; /** * Threshold for numerical features of anomaly detection. This is shared by * all objectives of Featurestore Monitoring for numerical features (i.e. * Features with type (Feature.ValueType) DOUBLE or INT64). */ numericalThresholdConfig?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig; /** * The config for Snapshot Analysis Based Feature Monitoring. */ snapshotAnalysis?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigSnapshotAnalysis; } /** * Configuration of the Featurestore's ImportFeature Analysis Based Monitoring. * This type of analysis generates statistics for values of each Feature * imported by every ImportFeatureValues operation. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfigImportFeaturesAnalysis { /** * The baseline used to do anomaly detection for the statistics generated by * import features analysis. */ anomalyDetectionBaseline?: | "BASELINE_UNSPECIFIED" | "LATEST_STATS" | "MOST_RECENT_SNAPSHOT_STATS" | "PREVIOUS_IMPORT_FEATURES_STATS"; /** * Whether to enable / disable / inherite default hebavior for import * features analysis. */ state?: | "STATE_UNSPECIFIED" | "DEFAULT" | "ENABLED" | "DISABLED"; } /** * Configuration of the Featurestore's Snapshot Analysis Based Monitoring. This * type of analysis generates statistics for each Feature based on a snapshot of * the latest feature value of each entities every monitoring_interval. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfigSnapshotAnalysis { /** * The monitoring schedule for snapshot analysis. For EntityType-level * config: unset / disabled = true indicates disabled by default for Features * under it; otherwise by default enable snapshot analysis monitoring with * monitoring_interval for Features under it. Feature-level config: disabled = * true indicates disabled regardless of the EntityType-level config; unset * monitoring_interval indicates going with EntityType-level config; otherwise * run snapshot analysis monitoring with monitoring_interval regardless of the * EntityType-level config. Explicitly Disable the snapshot analysis based * monitoring. */ disabled?: boolean; /** * Configuration of the snapshot analysis based monitoring pipeline running * interval. The value indicates number of days. */ monitoringIntervalDays?: number; /** * Customized export features time window for snapshot analysis. Unit is one * day. Default value is 3 weeks. Minimum value is 1 day. Maximum value is * 4000 days. */ stalenessDays?: number; } /** * The config for Featurestore Monitoring threshold. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig { /** * Specify a threshold value that can trigger the alert. 1. For categorical * feature, the distribution distance is calculated by L-inifinity norm. 2. * For numerical feature, the distribution distance is calculated by * Jensen–Shannon divergence. Each feature must have a non-zero threshold if * they need to be monitored. Otherwise no alert will be triggered for that * feature. */ value?: number; } /** * OnlineServingConfig specifies the details for provisioning online serving * resources. */ export interface GoogleCloudAiplatformV1FeaturestoreOnlineServingConfig { /** * The number of nodes for the online store. The number of nodes doesn't * scale automatically, but you can manually update the number of nodes. If * set to 0, the featurestore will not have an online store and cannot be used * for online serving. */ fixedNodeCount?: number; /** * Online serving scaling configuration. Only one of `fixed_node_count` and * `scaling` can be set. Setting one will reset the other. */ scaling?: GoogleCloudAiplatformV1FeaturestoreOnlineServingConfigScaling; } /** * Online serving scaling configuration. If min_node_count and max_node_count * are set to the same value, the cluster will be configured with the fixed * number of node (no auto-scaling). */ export interface GoogleCloudAiplatformV1FeaturestoreOnlineServingConfigScaling { /** * Optional. The cpu utilization that the Autoscaler should be trying to * achieve. This number is on a scale from 0 (no utilization) to 100 (total * utilization), and is limited between 10 and 80. When a cluster's CPU * utilization exceeds the target that you have set, Bigtable immediately adds * nodes to the cluster. When CPU utilization is substantially lower than the * target, Bigtable removes nodes. If not set or set to 0, default to 50. */ cpuUtilizationTarget?: number; /** * The maximum number of nodes to scale up to. Must be greater than * min_node_count, and less than or equal to 10 times of 'min_node_count'. */ maxNodeCount?: number; /** * Required. The minimum number of nodes to scale down to. Must be greater * than or equal to 1. */ minNodeCount?: number; } /** * Value for a feature. */ export interface GoogleCloudAiplatformV1FeatureValue { /** * A list of bool type feature value. */ boolArrayValue?: GoogleCloudAiplatformV1BoolArray; /** * Bool type feature value. */ boolValue?: boolean; /** * Bytes feature value. */ bytesValue?: Uint8Array; /** * A list of double type feature value. */ doubleArrayValue?: GoogleCloudAiplatformV1DoubleArray; /** * Double type feature value. */ doubleValue?: number; /** * A list of int64 type feature value. */ int64ArrayValue?: GoogleCloudAiplatformV1Int64Array; /** * Int64 feature value. */ int64Value?: bigint; /** * Metadata of feature value. */ metadata?: GoogleCloudAiplatformV1FeatureValueMetadata; /** * A list of string type feature value. */ stringArrayValue?: GoogleCloudAiplatformV1StringArray; /** * String feature value. */ stringValue?: string; /** * A struct type feature value. */ structValue?: GoogleCloudAiplatformV1StructValue; } function serializeGoogleCloudAiplatformV1FeatureValue(data: any): GoogleCloudAiplatformV1FeatureValue { return { ...data, bytesValue: data["bytesValue"] !== undefined ? encodeBase64(data["bytesValue"]) : undefined, int64ArrayValue: data["int64ArrayValue"] !== undefined ? serializeGoogleCloudAiplatformV1Int64Array(data["int64ArrayValue"]) : undefined, int64Value: data["int64Value"] !== undefined ? String(data["int64Value"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureValueMetadata(data["metadata"]) : undefined, structValue: data["structValue"] !== undefined ? serializeGoogleCloudAiplatformV1StructValue(data["structValue"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureValue(data: any): GoogleCloudAiplatformV1FeatureValue { return { ...data, bytesValue: data["bytesValue"] !== undefined ? decodeBase64(data["bytesValue"] as string) : undefined, int64ArrayValue: data["int64ArrayValue"] !== undefined ? deserializeGoogleCloudAiplatformV1Int64Array(data["int64ArrayValue"]) : undefined, int64Value: data["int64Value"] !== undefined ? BigInt(data["int64Value"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureValueMetadata(data["metadata"]) : undefined, structValue: data["structValue"] !== undefined ? deserializeGoogleCloudAiplatformV1StructValue(data["structValue"]) : undefined, }; } /** * A destination location for Feature values and format. */ export interface GoogleCloudAiplatformV1FeatureValueDestination { /** * Output in BigQuery format. BigQueryDestination.output_uri in * FeatureValueDestination.bigquery_destination must refer to a table. */ bigqueryDestination?: GoogleCloudAiplatformV1BigQueryDestination; /** * Output in CSV format. Array Feature value types are not allowed in CSV * format. */ csvDestination?: GoogleCloudAiplatformV1CsvDestination; /** * Output in TFRecord format. Below are the mapping from Feature value type * in Featurestore to Feature value type in TFRecord: Value type in * Featurestore | Value type in TFRecord DOUBLE, DOUBLE_ARRAY | FLOAT_LIST * INT64, INT64_ARRAY | INT64_LIST STRING, STRING_ARRAY, BYTES | BYTES_LIST * true -> byte_string("true"), false -> byte_string("false") BOOL, BOOL_ARRAY * (true, false) | BYTES_LIST */ tfrecordDestination?: GoogleCloudAiplatformV1TFRecordDestination; } /** * Container for list of values. */ export interface GoogleCloudAiplatformV1FeatureValueList { /** * A list of feature values. All of them should be the same data type. */ values?: GoogleCloudAiplatformV1FeatureValue[]; } function serializeGoogleCloudAiplatformV1FeatureValueList(data: any): GoogleCloudAiplatformV1FeatureValueList { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (serializeGoogleCloudAiplatformV1FeatureValue(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureValueList(data: any): GoogleCloudAiplatformV1FeatureValueList { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (deserializeGoogleCloudAiplatformV1FeatureValue(item))) : undefined, }; } /** * Metadata of feature value. */ export interface GoogleCloudAiplatformV1FeatureValueMetadata { /** * Feature generation timestamp. Typically, it is provided by user at feature * ingestion time. If not, feature store will use the system timestamp when * the data is ingested into feature store. For streaming ingestion, the time, * aligned by days, must be no older than five years (1825 days) and no later * than one year (366 days) in the future. */ generateTime?: Date; } function serializeGoogleCloudAiplatformV1FeatureValueMetadata(data: any): GoogleCloudAiplatformV1FeatureValueMetadata { return { ...data, generateTime: data["generateTime"] !== undefined ? data["generateTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureValueMetadata(data: any): GoogleCloudAiplatformV1FeatureValueMetadata { return { ...data, generateTime: data["generateTime"] !== undefined ? new Date(data["generateTime"]) : undefined, }; } /** * FeatureView is representation of values that the FeatureOnlineStore will * serve based on its syncConfig. */ export interface GoogleCloudAiplatformV1FeatureView { /** * Optional. Configures how data is supposed to be extracted from a BigQuery * source to be loaded onto the FeatureOnlineStore. */ bigQuerySource?: GoogleCloudAiplatformV1FeatureViewBigQuerySource; /** * Output only. Timestamp when this FeatureView was created. */ readonly createTime?: Date; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. Configures the features from a Feature Registry source that need * to be loaded onto the FeatureOnlineStore. */ featureRegistrySource?: GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource; /** * Optional. Configuration for index preparation for vector search. It * contains the required configurations to create an index from source data, * so that approximate nearest neighbor (a.k.a ANN) algorithms search can be * performed during online serving. */ indexConfig?: GoogleCloudAiplatformV1FeatureViewIndexConfig; /** * Optional. The labels with user-defined metadata to organize your * FeatureViews. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one * FeatureOnlineStore(System labels are excluded)." System reserved label keys * are prefixed with "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Identifier. Name of the FeatureView. Format: * `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ name?: string; /** * Optional. Configuration for FeatureView created under Optimized * FeatureOnlineStore. */ optimizedConfig?: GoogleCloudAiplatformV1FeatureViewOptimizedConfig; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Configures when data is to be synced/updated for this FeatureView. At the * end of the sync the latest featureValues for each entityId of this * FeatureView are made ready for online serving. */ syncConfig?: GoogleCloudAiplatformV1FeatureViewSyncConfig; /** * Output only. Timestamp when this FeatureView was last updated. */ readonly updateTime?: Date; /** * Optional. The Vertex RAG Source that the FeatureView is linked to. */ vertexRagSource?: GoogleCloudAiplatformV1FeatureViewVertexRagSource; } function serializeGoogleCloudAiplatformV1FeatureView(data: any): GoogleCloudAiplatformV1FeatureView { return { ...data, featureRegistrySource: data["featureRegistrySource"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data["featureRegistrySource"]) : undefined, indexConfig: data["indexConfig"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data["indexConfig"]) : undefined, vertexRagSource: data["vertexRagSource"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data["vertexRagSource"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureView(data: any): GoogleCloudAiplatformV1FeatureView { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, featureRegistrySource: data["featureRegistrySource"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data["featureRegistrySource"]) : undefined, indexConfig: data["indexConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data["indexConfig"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, vertexRagSource: data["vertexRagSource"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data["vertexRagSource"]) : undefined, }; } export interface GoogleCloudAiplatformV1FeatureViewBigQuerySource { /** * Required. Columns to construct entity_id / row keys. */ entityIdColumns?: string[]; /** * Required. The BigQuery view URI that will be materialized on each sync * trigger based on FeatureView.SyncConfig. */ uri?: string; } /** * Lookup key for a feature view. */ export interface GoogleCloudAiplatformV1FeatureViewDataKey { /** * The actual Entity ID will be composed from this struct. This should match * with the way ID is defined in the FeatureView spec. */ compositeKey?: GoogleCloudAiplatformV1FeatureViewDataKeyCompositeKey; /** * String key to use for lookup. */ key?: string; } /** * ID that is comprised from several parts (columns). */ export interface GoogleCloudAiplatformV1FeatureViewDataKeyCompositeKey { /** * Parts to construct Entity ID. Should match with the same ID columns as * defined in FeatureView in the same order. */ parts?: string[]; } /** * A Feature Registry source for features that need to be synced to Online * Store. */ export interface GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource { /** * Required. List of features that need to be synced to Online Store. */ featureGroups?: GoogleCloudAiplatformV1FeatureViewFeatureRegistrySourceFeatureGroup[]; /** * Optional. The project number of the parent project of the Feature Groups. */ projectNumber?: bigint; } function serializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data: any): GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource { return { ...data, projectNumber: data["projectNumber"] !== undefined ? String(data["projectNumber"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data: any): GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource { return { ...data, projectNumber: data["projectNumber"] !== undefined ? BigInt(data["projectNumber"]) : undefined, }; } /** * Features belonging to a single feature group that will be synced to Online * Store. */ export interface GoogleCloudAiplatformV1FeatureViewFeatureRegistrySourceFeatureGroup { /** * Required. Identifier of the feature group. */ featureGroupId?: string; /** * Required. Identifiers of features under the feature group. */ featureIds?: string[]; } /** * Configuration for vector indexing. */ export interface GoogleCloudAiplatformV1FeatureViewIndexConfig { /** * Optional. Configuration options for using brute force search, which simply * implements the standard linear search in the database for each query. It is * primarily meant for benchmarking and to generate the ground truth for * approximate search. */ bruteForceConfig?: GoogleCloudAiplatformV1FeatureViewIndexConfigBruteForceConfig; /** * Optional. Column of crowding. This column contains crowding attribute * which is a constraint on a neighbor list produced by * FeatureOnlineStoreService.SearchNearestEntities to diversify search * results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is * set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than * K entities of the same crowding attribute are returned in the response. */ crowdingColumn?: string; /** * Optional. The distance measure used in nearest neighbor search. */ distanceMeasureType?: | "DISTANCE_MEASURE_TYPE_UNSPECIFIED" | "SQUARED_L2_DISTANCE" | "COSINE_DISTANCE" | "DOT_PRODUCT_DISTANCE"; /** * Optional. Column of embedding. This column contains the source data to * create index for vector search. embedding_column must be set when using * vector search. */ embeddingColumn?: string; /** * Optional. The number of dimensions of the input embedding. */ embeddingDimension?: number; /** * Optional. Columns of features that're used to filter vector search * results. */ filterColumns?: string[]; /** * Optional. Configuration options for the tree-AH algorithm (Shallow tree + * Asymmetric Hashing). Please refer to this paper for more details: * https://arxiv.org/abs/1908.10396 */ treeAhConfig?: GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig; } function serializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfig { return { ...data, treeAhConfig: data["treeAhConfig"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data["treeAhConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfig { return { ...data, treeAhConfig: data["treeAhConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data["treeAhConfig"]) : undefined, }; } /** * Configuration options for using brute force search. */ export interface GoogleCloudAiplatformV1FeatureViewIndexConfigBruteForceConfig { } /** * Configuration options for the tree-AH algorithm. */ export interface GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig { /** * Optional. Number of embeddings on each leaf node. The default value is * 1000 if not set. */ leafNodeEmbeddingCount?: bigint; } function serializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig { return { ...data, leafNodeEmbeddingCount: data["leafNodeEmbeddingCount"] !== undefined ? String(data["leafNodeEmbeddingCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig { return { ...data, leafNodeEmbeddingCount: data["leafNodeEmbeddingCount"] !== undefined ? BigInt(data["leafNodeEmbeddingCount"]) : undefined, }; } /** * Configuration for FeatureViews created in Optimized FeatureOnlineStore. */ export interface GoogleCloudAiplatformV1FeatureViewOptimizedConfig { /** * Optional. A description of resources that the FeatureView uses, which to * large degree are decided by Vertex AI, and optionally allows only a modest * additional configuration. If min_replica_count is not set, the default * value is 2. If max_replica_count is not set, the default value is 6. The * max allowed replica count is 1000. */ automaticResources?: GoogleCloudAiplatformV1AutomaticResources; } /** * FeatureViewSync is a representation of sync operation which copies data from * data source to Feature View in Online Store. */ export interface GoogleCloudAiplatformV1FeatureViewSync { /** * Output only. Time when this FeatureViewSync is created. Creation of a * FeatureViewSync means that the job is pending / waiting for sufficient * resources but may not have started the actual data transfer yet. */ readonly createTime?: Date; /** * Output only. Final status of the FeatureViewSync. */ readonly finalStatus?: GoogleRpcStatus; /** * Identifier. Name of the FeatureViewSync. Format: * `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}/featureViewSyncs/{feature_view_sync}` */ name?: string; /** * Output only. Time when this FeatureViewSync is finished. */ readonly runTime?: GoogleTypeInterval; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Summary of the sync job. */ readonly syncSummary?: GoogleCloudAiplatformV1FeatureViewSyncSyncSummary; } /** * Configuration for Sync. Only one option is set. */ export interface GoogleCloudAiplatformV1FeatureViewSyncConfig { /** * Optional. If true, syncs the FeatureView in a continuous manner to Online * Store. */ continuous?: boolean; /** * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled * runs. To explicitly set a timezone to the cron tab, apply a prefix in the * cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or "TZ=${IANA_TIME_ZONE}". The * ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. * For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York * 1 * * * *". */ cron?: string; } /** * Summary from the Sync job. For continuous syncs, the summary is updated * periodically. For batch syncs, it gets updated on completion of the sync. */ export interface GoogleCloudAiplatformV1FeatureViewSyncSyncSummary { /** * Output only. Total number of rows synced. */ readonly rowSynced?: bigint; /** * Lower bound of the system time watermark for the sync job. This is only * set for continuously syncing feature views. */ systemWatermarkTime?: Date; /** * Output only. BigQuery slot milliseconds consumed for the sync job. */ readonly totalSlot?: bigint; } function serializeGoogleCloudAiplatformV1FeatureViewSyncSyncSummary(data: any): GoogleCloudAiplatformV1FeatureViewSyncSyncSummary { return { ...data, systemWatermarkTime: data["systemWatermarkTime"] !== undefined ? data["systemWatermarkTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewSyncSyncSummary(data: any): GoogleCloudAiplatformV1FeatureViewSyncSyncSummary { return { ...data, rowSynced: data["rowSynced"] !== undefined ? BigInt(data["rowSynced"]) : undefined, systemWatermarkTime: data["systemWatermarkTime"] !== undefined ? new Date(data["systemWatermarkTime"]) : undefined, totalSlot: data["totalSlot"] !== undefined ? BigInt(data["totalSlot"]) : undefined, }; } /** * A Vertex Rag source for features that need to be synced to Online Store. */ export interface GoogleCloudAiplatformV1FeatureViewVertexRagSource { /** * Optional. The RAG corpus id corresponding to this FeatureView. */ ragCorpusId?: bigint; /** * Required. The BigQuery view/table URI that will be materialized on each * manual sync trigger. The table/view is expected to have the following * columns and types at least: - `corpus_id` (STRING, NULLABLE/REQUIRED) - * `file_id` (STRING, NULLABLE/REQUIRED) - `chunk_id` (STRING, * NULLABLE/REQUIRED) - `chunk_data_type` (STRING, NULLABLE/REQUIRED) - * `chunk_data` (STRING, NULLABLE/REQUIRED) - `embeddings` (FLOAT, REPEATED) - * `file_original_uri` (STRING, NULLABLE/REQUIRED) */ uri?: string; } function serializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data: any): GoogleCloudAiplatformV1FeatureViewVertexRagSource { return { ...data, ragCorpusId: data["ragCorpusId"] !== undefined ? String(data["ragCorpusId"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data: any): GoogleCloudAiplatformV1FeatureViewVertexRagSource { return { ...data, ragCorpusId: data["ragCorpusId"] !== undefined ? BigInt(data["ragCorpusId"]) : undefined, }; } /** * Request message for FeatureOnlineStoreService.FetchFeatureValues. All the * features under the requested feature view will be returned. */ export interface GoogleCloudAiplatformV1FetchFeatureValuesRequest { /** * Optional. Response data format. If not set, * FeatureViewDataFormat.KEY_VALUE will be used. */ dataFormat?: | "FEATURE_VIEW_DATA_FORMAT_UNSPECIFIED" | "KEY_VALUE" | "PROTO_STRUCT"; /** * Optional. The request key to fetch feature values for. */ dataKey?: GoogleCloudAiplatformV1FeatureViewDataKey; } /** * Response message for FeatureOnlineStoreService.FetchFeatureValues */ export interface GoogleCloudAiplatformV1FetchFeatureValuesResponse { /** * The data key associated with this response. Will only be populated for * FeatureOnlineStoreService.StreamingFetchFeatureValues RPCs. */ dataKey?: GoogleCloudAiplatformV1FeatureViewDataKey; /** * Feature values in KeyValue format. */ keyValues?: GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList; /** * Feature values in proto Struct format. */ protoStruct?: { [key: string]: any }; } function serializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponse { return { ...data, keyValues: data["keyValues"] !== undefined ? serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data["keyValues"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponse { return { ...data, keyValues: data["keyValues"] !== undefined ? deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data["keyValues"]) : undefined, }; } /** * Response structure in the format of key (feature name) and (feature) value * pair. */ export interface GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList { /** * List of feature names and values. */ features?: GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair[]; } function serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList { return { ...data, features: data["features"] !== undefined ? data["features"].map((item: any) => (serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList { return { ...data, features: data["features"] !== undefined ? data["features"].map((item: any) => (deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(item))) : undefined, }; } /** * Feature name & value pair. */ export interface GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { /** * Feature short name. */ name?: string; /** * Feature value. */ value?: GoogleCloudAiplatformV1FeatureValue; } function serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { return { ...data, value: data["value"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { return { ...data, value: data["value"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, }; } /** * Request message for PredictionService.FetchPredictOperation. */ export interface GoogleCloudAiplatformV1FetchPredictOperationRequest { /** * Required. The server-assigned name for the operation. */ operationName?: string; } /** * URI based data. */ export interface GoogleCloudAiplatformV1FileData { /** * Required. URI. */ fileUri?: string; /** * Required. The IANA standard MIME type of the source data. */ mimeType?: string; } /** * RagFile status. */ export interface GoogleCloudAiplatformV1FileStatus { /** * Output only. Only when the `state` field is ERROR. */ readonly errorStatus?: string; /** * Output only. RagFile state. */ readonly state?: | "STATE_UNSPECIFIED" | "ACTIVE" | "ERROR"; } /** * Assigns input data to training, validation, and test sets based on the given * filters, data pieces not matched by any filter are ignored. Currently only * supported for Datasets containing DataItems. If any of the filters in this * message are to match nothing, then they can be set as '-' (the minus sign). * Supported only for unstructured Datasets. */ export interface GoogleCloudAiplatformV1FilterSplit { /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to test the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ testFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to train the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ trainingFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to validate the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ validationFilter?: string; } /** * The request message for MatchService.FindNeighbors. */ export interface GoogleCloudAiplatformV1FindNeighborsRequest { /** * The ID of the DeployedIndex that will serve the request. This request is * sent to a specific IndexEndpoint, as per the IndexEndpoint.network. That * IndexEndpoint also has IndexEndpoint.deployed_indexes, and each such index * has a DeployedIndex.id field. The value of the field below must equal one * of the DeployedIndex.id fields of the IndexEndpoint that is being called * for this request. */ deployedIndexId?: string; /** * The list of queries. */ queries?: GoogleCloudAiplatformV1FindNeighborsRequestQuery[]; /** * If set to true, the full datapoints (including all vector values and * restricts) of the nearest neighbors are returned. Note that returning full * datapoint will significantly increase the latency and cost of the query. */ returnFullDatapoint?: boolean; } function serializeGoogleCloudAiplatformV1FindNeighborsRequest(data: any): GoogleCloudAiplatformV1FindNeighborsRequest { return { ...data, queries: data["queries"] !== undefined ? data["queries"].map((item: any) => (serializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsRequest(data: any): GoogleCloudAiplatformV1FindNeighborsRequest { return { ...data, queries: data["queries"] !== undefined ? data["queries"].map((item: any) => (deserializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(item))) : undefined, }; } /** * A query to find a number of the nearest neighbors (most similar vectors) of * a vector. */ export interface GoogleCloudAiplatformV1FindNeighborsRequestQuery { /** * The number of neighbors to find via approximate search before exact * reordering is performed. If not set, the default value from scam config is * used; if set, this value must be > 0. */ approximateNeighborCount?: number; /** * Required. The datapoint/vector whose nearest neighbors should be searched * for. */ datapoint?: GoogleCloudAiplatformV1IndexDatapoint; /** * The fraction of the number of leaves to search, set at query time allows * user to tune search performance. This value increase result in both search * accuracy and latency increase. The value should be between 0.0 and 1.0. If * not set or set to 0.0, query uses the default value specified in * NearestNeighborSearchConfig.TreeAHConfig.fraction_leaf_nodes_to_search. */ fractionLeafNodesToSearchOverride?: number; /** * The number of nearest neighbors to be retrieved from database for each * query. If not set, will use the default from the service configuration * (https://cloud.google.com/vertex-ai/docs/matching-engine/configuring-indexes#nearest-neighbor-search-config). */ neighborCount?: number; /** * Crowding is a constraint on a neighbor list produced by nearest neighbor * search requiring that no more than some value k' of the k neighbors * returned have the same value of crowding_attribute. It's used for improving * result diversity. This field is the maximum number of matches with the same * crowding tag. */ perCrowdingAttributeNeighborCount?: number; /** * Optional. Represents RRF algorithm that combines search results. */ rrf?: GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF; } function serializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(data: any): GoogleCloudAiplatformV1FindNeighborsRequestQuery { return { ...data, datapoint: data["datapoint"] !== undefined ? serializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(data: any): GoogleCloudAiplatformV1FindNeighborsRequestQuery { return { ...data, datapoint: data["datapoint"] !== undefined ? deserializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } /** * Parameters for RRF algorithm that combines search results. */ export interface GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF { /** * Required. Users can provide an alpha value to give more weight to dense vs * sparse results. For example, if the alpha is 0, we only return sparse and * if the alpha is 1, we only return dense. */ alpha?: number; } /** * The response message for MatchService.FindNeighbors. */ export interface GoogleCloudAiplatformV1FindNeighborsResponse { /** * The nearest neighbors of the query datapoints. */ nearestNeighbors?: GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors[]; } function serializeGoogleCloudAiplatformV1FindNeighborsResponse(data: any): GoogleCloudAiplatformV1FindNeighborsResponse { return { ...data, nearestNeighbors: data["nearestNeighbors"] !== undefined ? data["nearestNeighbors"].map((item: any) => (serializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsResponse(data: any): GoogleCloudAiplatformV1FindNeighborsResponse { return { ...data, nearestNeighbors: data["nearestNeighbors"] !== undefined ? data["nearestNeighbors"].map((item: any) => (deserializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(item))) : undefined, }; } /** * Nearest neighbors for one query. */ export interface GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors { /** * The ID of the query datapoint. */ id?: string; /** * All its neighbors. */ neighbors?: GoogleCloudAiplatformV1FindNeighborsResponseNeighbor[]; } function serializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (serializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (deserializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(item))) : undefined, }; } /** * A neighbor of the query vector. */ export interface GoogleCloudAiplatformV1FindNeighborsResponseNeighbor { /** * The datapoint of the neighbor. Note that full datapoints are returned only * when "return_full_datapoint" is set to true. Otherwise, only the * "datapoint_id" and "crowding_tag" fields are populated. */ datapoint?: GoogleCloudAiplatformV1IndexDatapoint; /** * The distance between the neighbor and the dense embedding query. */ distance?: number; /** * The distance between the neighbor and the query sparse_embedding. */ sparseDistance?: number; } function serializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNeighbor { return { ...data, datapoint: data["datapoint"] !== undefined ? serializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNeighbor { return { ...data, datapoint: data["datapoint"] !== undefined ? deserializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } /** * Input for fluency metric. */ export interface GoogleCloudAiplatformV1FluencyInput { /** * Required. Fluency instance. */ instance?: GoogleCloudAiplatformV1FluencyInstance; /** * Required. Spec for fluency score metric. */ metricSpec?: GoogleCloudAiplatformV1FluencySpec; } /** * Spec for fluency instance. */ export interface GoogleCloudAiplatformV1FluencyInstance { /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for fluency result. */ export interface GoogleCloudAiplatformV1FluencyResult { /** * Output only. Confidence for fluency score. */ readonly confidence?: number; /** * Output only. Explanation for fluency score. */ readonly explanation?: string; /** * Output only. Fluency score. */ readonly score?: number; } /** * Spec for fluency score metric. */ export interface GoogleCloudAiplatformV1FluencySpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Assigns the input data to training, validation, and test sets as per the * given fractions. Any of `training_fraction`, `validation_fraction` and * `test_fraction` may optionally be provided, they must sum to up to 1. If the * provided ones sum to less than 1, the remainder is assigned to sets as * decided by Vertex AI. If none of the fractions are set, by default roughly * 80% of data is used for training, 10% for validation, and 10% for test. */ export interface GoogleCloudAiplatformV1FractionSplit { /** * The fraction of the input data that is to be used to evaluate the Model. */ testFraction?: number; /** * The fraction of the input data that is to be used to train the Model. */ trainingFraction?: number; /** * The fraction of the input data that is to be used to validate the Model. */ validationFraction?: number; } /** * Input for fulfillment metric. */ export interface GoogleCloudAiplatformV1FulfillmentInput { /** * Required. Fulfillment instance. */ instance?: GoogleCloudAiplatformV1FulfillmentInstance; /** * Required. Spec for fulfillment score metric. */ metricSpec?: GoogleCloudAiplatformV1FulfillmentSpec; } /** * Spec for fulfillment instance. */ export interface GoogleCloudAiplatformV1FulfillmentInstance { /** * Required. Inference instruction prompt to compare prediction with. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for fulfillment result. */ export interface GoogleCloudAiplatformV1FulfillmentResult { /** * Output only. Confidence for fulfillment score. */ readonly confidence?: number; /** * Output only. Explanation for fulfillment score. */ readonly explanation?: string; /** * Output only. Fulfillment score. */ readonly score?: number; } /** * Spec for fulfillment metric. */ export interface GoogleCloudAiplatformV1FulfillmentSpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * A predicted [FunctionCall] returned from the model that contains a string * representing the [FunctionDeclaration.name] and a structured JSON object * containing the parameters and their values. */ export interface GoogleCloudAiplatformV1FunctionCall { /** * Optional. Required. The function parameters and values in JSON object * format. See [FunctionDeclaration.parameters] for parameter details. */ args?: { [key: string]: any }; /** * Required. The name of the function to call. Matches * [FunctionDeclaration.name]. */ name?: string; } /** * Function calling config. */ export interface GoogleCloudAiplatformV1FunctionCallingConfig { /** * Optional. Function names to call. Only set when the Mode is ANY. Function * names should match [FunctionDeclaration.name]. With mode set to ANY, model * will predict a function call from the set of function names provided. */ allowedFunctionNames?: string[]; /** * Optional. Function calling mode. */ mode?: | "MODE_UNSPECIFIED" | "AUTO" | "ANY" | "NONE"; } /** * Structured representation of a function declaration as defined by the * [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included * in this declaration are the function name, description, parameters and * response type. This FunctionDeclaration is a representation of a block of * code that can be used as a `Tool` by the model and executed by the client. */ export interface GoogleCloudAiplatformV1FunctionDeclaration { /** * Optional. Description and purpose of the function. Model uses it to decide * how and whether to call the function. */ description?: string; /** * Required. The name of the function to call. Must start with a letter or an * underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, * with a maximum length of 64. */ name?: string; /** * Optional. Describes the parameters to this function in JSON Schema Object * format. Reflects the Open API 3.03 Parameter Object. string Key: the name * of the parameter. Parameter names are case sensitive. Schema Value: the * Schema defining the type used for the parameter. For function with no * parameters, this can be left unset. Parameter names must start with a * letter or an underscore and must only contain chars a-z, A-Z, 0-9, or * underscores with a maximum length of 64. Example with 1 required and 1 * optional parameter: type: OBJECT properties: param1: type: STRING param2: * type: INTEGER required: - param1 */ parameters?: GoogleCloudAiplatformV1Schema; /** * Optional. Describes the output from this function in JSON Schema format. * Reflects the Open API 3.03 Response Object. The Schema defines the type * used for the response value of the function. */ response?: GoogleCloudAiplatformV1Schema; } function serializeGoogleCloudAiplatformV1FunctionDeclaration(data: any): GoogleCloudAiplatformV1FunctionDeclaration { return { ...data, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Schema(data["parameters"]) : undefined, response: data["response"] !== undefined ? serializeGoogleCloudAiplatformV1Schema(data["response"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FunctionDeclaration(data: any): GoogleCloudAiplatformV1FunctionDeclaration { return { ...data, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Schema(data["parameters"]) : undefined, response: data["response"] !== undefined ? deserializeGoogleCloudAiplatformV1Schema(data["response"]) : undefined, }; } /** * The result output from a [FunctionCall] that contains a string representing * the [FunctionDeclaration.name] and a structured JSON object containing any * output from the function is used as context to the model. This should contain * the result of a [FunctionCall] made based on model prediction. */ export interface GoogleCloudAiplatformV1FunctionResponse { /** * Required. The name of the function to call. Matches * [FunctionDeclaration.name] and [FunctionCall.name]. */ name?: string; /** * Required. The function response in JSON object format. Use "output" key to * specify function output and "error" key to specify error details (if any). * If "output" and "error" keys are not specified, then whole "response" is * treated as function output. */ response?: { [key: string]: any }; } /** * The Google Cloud Storage location where the output is to be written to. */ export interface GoogleCloudAiplatformV1GcsDestination { /** * Required. Google Cloud Storage URI to output directory. If the uri doesn't * end with '/', a '/' will be automatically appended. The directory is * created if it doesn't exist. */ outputUriPrefix?: string; } /** * The Google Cloud Storage location for the input content. */ export interface GoogleCloudAiplatformV1GcsSource { /** * Required. Google Cloud Storage URI(-s) to the input file(s). May contain * wildcards. For more information on wildcards, see * https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. */ uris?: string[]; } /** * Request message for [PredictionService.GenerateContent]. */ export interface GoogleCloudAiplatformV1GenerateContentRequest { /** * Optional. The name of the cached content used as context to serve the * prediction. Note: only used in explicit caching, where users can have * control over caching (e.g. what content to cache) and enjoy guaranteed cost * savings. Format: * `projects/{project}/locations/{location}/cachedContents/{cachedContent}` */ cachedContent?: string; /** * Required. The content of the current conversation with the model. For * single-turn queries, this is a single instance. For multi-turn queries, * this is a repeated field that contains conversation history + latest * request. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. Generation config. */ generationConfig?: GoogleCloudAiplatformV1GenerationConfig; /** * Optional. The labels with user-defined metadata for the request. It is * used for billing and reporting only. Label keys and values can be no longer * than 63 characters (Unicode codepoints) and can only contain lowercase * letters, numeric characters, underscores, and dashes. International * characters are allowed. Label values are optional. Label keys must start * with a letter. */ labels?: { [key: string]: string }; /** * Optional. Per request settings for blocking unsafe content. Enforced on * GenerateContentResponse.candidates. */ safetySettings?: GoogleCloudAiplatformV1SafetySetting[]; /** * Optional. The user provided system instructions for the model. Note: only * text should be used in parts and content in each part will be in a separate * paragraph. */ systemInstruction?: GoogleCloudAiplatformV1Content; /** * Optional. Tool config. This config is shared for all tools provided in the * request. */ toolConfig?: GoogleCloudAiplatformV1ToolConfig; /** * Optional. A list of `Tools` the model may use to generate the next * response. A `Tool` is a piece of code that enables the system to interact * with external systems to perform an action, or set of actions, outside of * knowledge and scope of the model. */ tools?: GoogleCloudAiplatformV1Tool[]; } function serializeGoogleCloudAiplatformV1GenerateContentRequest(data: any): GoogleCloudAiplatformV1GenerateContentRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? serializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (serializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1GenerateContentRequest(data: any): GoogleCloudAiplatformV1GenerateContentRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } /** * Response message for [PredictionService.GenerateContent]. */ export interface GoogleCloudAiplatformV1GenerateContentResponse { /** * Output only. Generated candidates. */ readonly candidates?: GoogleCloudAiplatformV1Candidate[]; /** * Output only. The model version used to generate the response. */ readonly modelVersion?: string; /** * Output only. Content filter results for a prompt sent in the request. * Note: Sent only in the first stream chunk. Only happens when no candidates * were generated due to content violations. */ readonly promptFeedback?: GoogleCloudAiplatformV1GenerateContentResponsePromptFeedback; /** * Usage metadata about the response(s). */ usageMetadata?: GoogleCloudAiplatformV1GenerateContentResponseUsageMetadata; } /** * Content filter results for a prompt sent in the request. */ export interface GoogleCloudAiplatformV1GenerateContentResponsePromptFeedback { /** * Output only. Blocked reason. */ readonly blockReason?: | "BLOCKED_REASON_UNSPECIFIED" | "SAFETY" | "OTHER" | "BLOCKLIST" | "PROHIBITED_CONTENT"; /** * Output only. A readable block reason message. */ readonly blockReasonMessage?: string; /** * Output only. Safety ratings. */ readonly safetyRatings?: GoogleCloudAiplatformV1SafetyRating[]; } /** * Usage metadata about response(s). */ export interface GoogleCloudAiplatformV1GenerateContentResponseUsageMetadata { /** * Output only. Number of tokens in the cached part in the input (the cached * content). */ readonly cachedContentTokenCount?: number; /** * Number of tokens in the response(s). */ candidatesTokenCount?: number; /** * Number of tokens in the request. When `cached_content` is set, this is * still the total effective prompt size meaning this includes the number of * tokens in the cached content. */ promptTokenCount?: number; /** * Total token count for prompt and response candidates. */ totalTokenCount?: number; } /** * Generation config. */ export interface GoogleCloudAiplatformV1GenerationConfig { /** * Optional. If enabled, audio timestamp will be included in the request to * the model. */ audioTimestamp?: boolean; /** * Optional. Number of candidates to generate. */ candidateCount?: number; /** * Optional. Frequency penalties. */ frequencyPenalty?: number; /** * Optional. Logit probabilities. */ logprobs?: number; /** * Optional. The maximum number of output tokens to generate per message. */ maxOutputTokens?: number; /** * Optional. Positive penalties. */ presencePenalty?: number; /** * Optional. If true, export the logprobs results in response. */ responseLogprobs?: boolean; /** * Optional. Output response mimetype of the generated candidate text. * Supported mimetype: - `text/plain`: (default) Text output. - * `application/json`: JSON response in the candidates. The model needs to be * prompted to output the appropriate response type, otherwise the behavior is * undefined. This is a preview feature. */ responseMimeType?: string; /** * Optional. The `Schema` object allows the definition of input and output * data types. These types can be objects, but also primitives and arrays. * Represents a select subset of an [OpenAPI 3.0 schema * object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible * response_mime_type must also be set. Compatible mimetypes: * `application/json`: Schema for JSON response. */ responseSchema?: GoogleCloudAiplatformV1Schema; /** * Optional. Routing configuration. */ routingConfig?: GoogleCloudAiplatformV1GenerationConfigRoutingConfig; /** * Optional. Seed. */ seed?: number; /** * Optional. Stop sequences. */ stopSequences?: string[]; /** * Optional. Controls the randomness of predictions. */ temperature?: number; /** * Optional. If specified, top-k sampling will be used. */ topK?: number; /** * Optional. If specified, nucleus sampling will be used. */ topP?: number; } function serializeGoogleCloudAiplatformV1GenerationConfig(data: any): GoogleCloudAiplatformV1GenerationConfig { return { ...data, responseSchema: data["responseSchema"] !== undefined ? serializeGoogleCloudAiplatformV1Schema(data["responseSchema"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1GenerationConfig(data: any): GoogleCloudAiplatformV1GenerationConfig { return { ...data, responseSchema: data["responseSchema"] !== undefined ? deserializeGoogleCloudAiplatformV1Schema(data["responseSchema"]) : undefined, }; } /** * The configuration for routing the request to a specific model. */ export interface GoogleCloudAiplatformV1GenerationConfigRoutingConfig { /** * Automated routing. */ autoMode?: GoogleCloudAiplatformV1GenerationConfigRoutingConfigAutoRoutingMode; /** * Manual routing. */ manualMode?: GoogleCloudAiplatformV1GenerationConfigRoutingConfigManualRoutingMode; } /** * When automated routing is specified, the routing will be determined by the * pretrained routing model and customer provided model routing preference. */ export interface GoogleCloudAiplatformV1GenerationConfigRoutingConfigAutoRoutingMode { /** * The model routing preference. */ modelRoutingPreference?: | "UNKNOWN" | "PRIORITIZE_QUALITY" | "BALANCED" | "PRIORITIZE_COST"; } /** * When manual routing is set, the specified model will be used directly. */ export interface GoogleCloudAiplatformV1GenerationConfigRoutingConfigManualRoutingMode { /** * The model name to use. Only the public LLM models are accepted. e.g. * 'gemini-1.5-pro-001'. */ modelName?: string; } /** * Generic Metadata shared by all operations. */ export interface GoogleCloudAiplatformV1GenericOperationMetadata { /** * Output only. Time when the operation was created. */ readonly createTime?: Date; /** * Output only. Partial failures encountered. E.g. single files that couldn't * be read. This field should never exceed 20 entries. Status details field * will contain standard Google Cloud error details. */ readonly partialFailures?: GoogleRpcStatus[]; /** * Output only. Time when the operation was updated for the last time. If the * operation has finished (successfully or not), this is the finish time. */ readonly updateTime?: Date; } /** * Contains information about the source of the models generated from * Generative AI Studio. */ export interface GoogleCloudAiplatformV1GenieSource { /** * Required. The public base model URI. */ baseModelUri?: string; } /** * The Google Drive location for the input content. */ export interface GoogleCloudAiplatformV1GoogleDriveSource { /** * Required. Google Drive resource IDs. */ resourceIds?: GoogleCloudAiplatformV1GoogleDriveSourceResourceId[]; } /** * The type and ID of the Google Drive resource. */ export interface GoogleCloudAiplatformV1GoogleDriveSourceResourceId { /** * Required. The ID of the Google Drive resource. */ resourceId?: string; /** * Required. The type of the Google Drive resource. */ resourceType?: | "RESOURCE_TYPE_UNSPECIFIED" | "RESOURCE_TYPE_FILE" | "RESOURCE_TYPE_FOLDER"; } /** * Tool to retrieve public web data for grounding, powered by Google. */ export interface GoogleCloudAiplatformV1GoogleSearchRetrieval { /** * Specifies the dynamic retrieval configuration for the given source. */ dynamicRetrievalConfig?: GoogleCloudAiplatformV1DynamicRetrievalConfig; } /** * Input for groundedness metric. */ export interface GoogleCloudAiplatformV1GroundednessInput { /** * Required. Groundedness instance. */ instance?: GoogleCloudAiplatformV1GroundednessInstance; /** * Required. Spec for groundedness metric. */ metricSpec?: GoogleCloudAiplatformV1GroundednessSpec; } /** * Spec for groundedness instance. */ export interface GoogleCloudAiplatformV1GroundednessInstance { /** * Required. Background information provided in context used to compare * against the prediction. */ context?: string; /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for groundedness result. */ export interface GoogleCloudAiplatformV1GroundednessResult { /** * Output only. Confidence for groundedness score. */ readonly confidence?: number; /** * Output only. Explanation for groundedness score. */ readonly explanation?: string; /** * Output only. Groundedness score. */ readonly score?: number; } /** * Spec for groundedness metric. */ export interface GoogleCloudAiplatformV1GroundednessSpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Grounding chunk. */ export interface GoogleCloudAiplatformV1GroundingChunk { /** * Grounding chunk from context retrieved by the retrieval tools. */ retrievedContext?: GoogleCloudAiplatformV1GroundingChunkRetrievedContext; /** * Grounding chunk from the web. */ web?: GoogleCloudAiplatformV1GroundingChunkWeb; } /** * Chunk from context retrieved by the retrieval tools. */ export interface GoogleCloudAiplatformV1GroundingChunkRetrievedContext { /** * Text of the attribution. */ text?: string; /** * Title of the attribution. */ title?: string; /** * URI reference of the attribution. */ uri?: string; } /** * Chunk from the web. */ export interface GoogleCloudAiplatformV1GroundingChunkWeb { /** * Title of the chunk. */ title?: string; /** * URI reference of the chunk. */ uri?: string; } /** * Metadata returned to client when grounding is enabled. */ export interface GoogleCloudAiplatformV1GroundingMetadata { /** * List of supporting references retrieved from specified grounding source. */ groundingChunks?: GoogleCloudAiplatformV1GroundingChunk[]; /** * Optional. List of grounding support. */ groundingSupports?: GoogleCloudAiplatformV1GroundingSupport[]; /** * Optional. Output only. Retrieval metadata. */ readonly retrievalMetadata?: GoogleCloudAiplatformV1RetrievalMetadata; /** * Optional. Google search entry for the following-up web searches. */ searchEntryPoint?: GoogleCloudAiplatformV1SearchEntryPoint; /** * Optional. Web search queries for the following-up web search. */ webSearchQueries?: string[]; } function serializeGoogleCloudAiplatformV1GroundingMetadata(data: any): GoogleCloudAiplatformV1GroundingMetadata { return { ...data, searchEntryPoint: data["searchEntryPoint"] !== undefined ? serializeGoogleCloudAiplatformV1SearchEntryPoint(data["searchEntryPoint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1GroundingMetadata(data: any): GoogleCloudAiplatformV1GroundingMetadata { return { ...data, searchEntryPoint: data["searchEntryPoint"] !== undefined ? deserializeGoogleCloudAiplatformV1SearchEntryPoint(data["searchEntryPoint"]) : undefined, }; } /** * Grounding support. */ export interface GoogleCloudAiplatformV1GroundingSupport { /** * Confidence score of the support references. Ranges from 0 to 1. 1 is the * most confident. This list must have the same size as the * grounding_chunk_indices. */ confidenceScores?: number[]; /** * A list of indices (into 'grounding_chunk') specifying the citations * associated with the claim. For instance [1,3,4] means that * grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the * retrieved content attributed to the claim. */ groundingChunkIndices?: number[]; /** * Segment of the content this support belongs to. */ segment?: GoogleCloudAiplatformV1Segment; } /** * Represents a HyperparameterTuningJob. A HyperparameterTuningJob has a Study * specification and multiple CustomJobs with identical CustomJob specification. */ export interface GoogleCloudAiplatformV1HyperparameterTuningJob { /** * Output only. Time when the HyperparameterTuningJob was created. */ readonly createTime?: Date; /** * Required. The display name of the HyperparameterTuningJob. The name can be * up to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key options for a HyperparameterTuningJob. If * this is set, then all resources created by the HyperparameterTuningJob will * be encrypted with the provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the HyperparameterTuningJob entered any of the * following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, * `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when job's state is JOB_STATE_FAILED or * JOB_STATE_CANCELLED. */ readonly error?: GoogleRpcStatus; /** * The labels with user-defined metadata to organize * HyperparameterTuningJobs. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * The number of failed Trials that need to be seen before failing the * HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials * must fail before the whole job fails. */ maxFailedTrialCount?: number; /** * Required. The desired total number of Trials. */ maxTrialCount?: number; /** * Output only. Resource name of the HyperparameterTuningJob. */ readonly name?: string; /** * Required. The desired number of Trials to run in parallel. */ parallelTrialCount?: number; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the HyperparameterTuningJob for the first time * entered the `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Required. Study configuration of the HyperparameterTuningJob. */ studySpec?: GoogleCloudAiplatformV1StudySpec; /** * Required. The spec of a trial job. The same spec applies to the CustomJobs * created in all the trials. */ trialJobSpec?: GoogleCloudAiplatformV1CustomJobSpec; /** * Output only. Trials of the HyperparameterTuningJob. */ readonly trials?: GoogleCloudAiplatformV1Trial[]; /** * Output only. Time when the HyperparameterTuningJob was most recently * updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1HyperparameterTuningJob(data: any): GoogleCloudAiplatformV1HyperparameterTuningJob { return { ...data, studySpec: data["studySpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, trialJobSpec: data["trialJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["trialJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(data: any): GoogleCloudAiplatformV1HyperparameterTuningJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, studySpec: data["studySpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, trialJobSpec: data["trialJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["trialJobSpec"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Matcher for Features of an EntityType by Feature ID. */ export interface GoogleCloudAiplatformV1IdMatcher { /** * Required. The following are accepted as `ids`: * A single-element list * containing only `*`, which selects all Features in the target EntityType, * or * A list containing only Feature IDs, which selects only Features with * those IDs in the target EntityType. */ ids?: string[]; } /** * Describes the location from where we import data into a Dataset, together * with the labels that will be applied to the DataItems and the Annotations. */ export interface GoogleCloudAiplatformV1ImportDataConfig { /** * Labels that will be applied to newly imported Annotations. If two * Annotations are identical, one of them will be deduped. Two Annotations are * considered identical if their payload, payload_schema_uri and all of their * labels are the same. These labels will be overridden by Annotation labels * specified inside index file referenced by import_schema_uri, e.g. jsonl * file. */ annotationLabels?: { [key: string]: string }; /** * Labels that will be applied to newly imported DataItems. If an identical * DataItem as one being imported already exists in the Dataset, then these * labels will be appended to these of the already existing one, and if labels * with identical key is imported before, the old label value will be * overwritten. If two DataItems are identical in the same import data * operation, the labels will be combined and if key collision happens in this * case, one of the values will be picked randomly. Two DataItems are * considered identical if their content bytes are identical (e.g. image bytes * or pdf bytes). These labels will be overridden by Annotation labels * specified inside index file referenced by import_schema_uri, e.g. jsonl * file. */ dataItemLabels?: { [key: string]: string }; /** * The Google Cloud Storage location for the input content. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Required. Points to a YAML file stored on Google Cloud Storage describing * the import format. Validation will be done against the schema. The schema * is defined as an [OpenAPI 3.0.2 Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ importSchemaUri?: string; } /** * Runtime operation information for DatasetService.ImportData. */ export interface GoogleCloudAiplatformV1ImportDataOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for DatasetService.ImportData. */ export interface GoogleCloudAiplatformV1ImportDataRequest { /** * Required. The desired input locations. The contents of all input locations * will be imported in one batch. */ importConfigs?: GoogleCloudAiplatformV1ImportDataConfig[]; } /** * Response message for DatasetService.ImportData. */ export interface GoogleCloudAiplatformV1ImportDataResponse { } /** * Details of operations that perform import Feature values. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata { /** * List of ImportFeatureValues operations running under a single EntityType * that are blocking this operation. */ blockingOperationIds?: bigint[]; /** * Operation metadata for Featurestore import Feature values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Number of entities that have been imported by the operation. */ importedEntityCount?: bigint; /** * Number of Feature values that have been imported by the operation. */ importedFeatureValueCount?: bigint; /** * The number of rows in input source that weren't imported due to either * * Not having any featureValues. * Having a null entityId. * Having a null * timestamp. * Not being parsable (applicable for CSV sources). */ invalidRowCount?: bigint; /** * The source URI from where Feature values are imported. */ sourceUris?: string[]; /** * The number rows that weren't ingested due to having timestamps outside the * retention boundary. */ timestampOutsideRetentionRowsCount?: bigint; } function serializeGoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata(data: any): GoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata { return { ...data, blockingOperationIds: data["blockingOperationIds"] !== undefined ? data["blockingOperationIds"].map((item: any) => (String(item))) : undefined, importedEntityCount: data["importedEntityCount"] !== undefined ? String(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? String(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? String(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? String(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata(data: any): GoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata { return { ...data, blockingOperationIds: data["blockingOperationIds"] !== undefined ? data["blockingOperationIds"].map((item: any) => (BigInt(item))) : undefined, importedEntityCount: data["importedEntityCount"] !== undefined ? BigInt(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? BigInt(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? BigInt(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? BigInt(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } /** * Request message for FeaturestoreService.ImportFeatureValues. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesRequest { avroSource?: GoogleCloudAiplatformV1AvroSource; bigquerySource?: GoogleCloudAiplatformV1BigQuerySource; csvSource?: GoogleCloudAiplatformV1CsvSource; /** * If true, API doesn't start ingestion analysis pipeline. */ disableIngestionAnalysis?: boolean; /** * If set, data will not be imported for online serving. This is typically * used for backfilling, where Feature generation timestamps are not in the * timestamp range needed for online serving. */ disableOnlineServing?: boolean; /** * Source column that holds entity IDs. If not provided, entity IDs are * extracted from the column named entity_id. */ entityIdField?: string; /** * Required. Specifications defining which Feature values to import from the * entity. The request fails if no feature_specs are provided, and having * multiple feature_specs for one Feature is not allowed. */ featureSpecs?: GoogleCloudAiplatformV1ImportFeatureValuesRequestFeatureSpec[]; /** * Single Feature timestamp for all entities being imported. The timestamp * must not have higher than millisecond precision. */ featureTime?: Date; /** * Source column that holds the Feature timestamp for all Feature values in * each entity. */ featureTimeField?: string; /** * Specifies the number of workers that are used to write data to the * Featurestore. Consider the online serving capacity that you require to * achieve the desired import throughput without interfering with online * serving. The value must be positive, and less than or equal to 100. If not * set, defaults to using 1 worker. The low count ensures minimal impact on * online serving performance. */ workerCount?: number; } function serializeGoogleCloudAiplatformV1ImportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ImportFeatureValuesRequest { return { ...data, featureTime: data["featureTime"] !== undefined ? data["featureTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ImportFeatureValuesRequest { return { ...data, featureTime: data["featureTime"] !== undefined ? new Date(data["featureTime"]) : undefined, }; } /** * Defines the Feature value(s) to import. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesRequestFeatureSpec { /** * Required. ID of the Feature to import values of. This Feature must exist * in the target EntityType, or the request will fail. */ id?: string; /** * Source column to get the Feature values from. If not set, uses the column * with the same name as the Feature ID. */ sourceField?: string; } /** * Response message for FeaturestoreService.ImportFeatureValues. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesResponse { /** * Number of entities that have been imported by the operation. */ importedEntityCount?: bigint; /** * Number of Feature values that have been imported by the operation. */ importedFeatureValueCount?: bigint; /** * The number of rows in input source that weren't imported due to either * * Not having any featureValues. * Having a null entityId. * Having a null * timestamp. * Not being parsable (applicable for CSV sources). */ invalidRowCount?: bigint; /** * The number rows that weren't ingested due to having feature timestamps * outside the retention boundary. */ timestampOutsideRetentionRowsCount?: bigint; } function serializeGoogleCloudAiplatformV1ImportFeatureValuesResponse(data: any): GoogleCloudAiplatformV1ImportFeatureValuesResponse { return { ...data, importedEntityCount: data["importedEntityCount"] !== undefined ? String(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? String(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? String(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? String(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportFeatureValuesResponse(data: any): GoogleCloudAiplatformV1ImportFeatureValuesResponse { return { ...data, importedEntityCount: data["importedEntityCount"] !== undefined ? BigInt(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? BigInt(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? BigInt(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? BigInt(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } /** * Request message for ModelService.ImportModelEvaluation */ export interface GoogleCloudAiplatformV1ImportModelEvaluationRequest { /** * Required. Model evaluation resource to be imported. */ modelEvaluation?: GoogleCloudAiplatformV1ModelEvaluation; } /** * Config for importing RagFiles. */ export interface GoogleCloudAiplatformV1ImportRagFilesConfig { /** * Google Cloud Storage location. Supports importing individual files as well * as entire Google Cloud Storage directories. Sample formats: - * `gs://bucket_name/my_directory/object_name/my_file.txt` - * `gs://bucket_name/my_directory` */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Google Drive location. Supports importing individual files as well as * Google Drive folders. */ googleDriveSource?: GoogleCloudAiplatformV1GoogleDriveSource; /** * Jira queries with their corresponding authentication. */ jiraSource?: GoogleCloudAiplatformV1JiraSource; /** * Optional. The max number of queries per minute that this job is allowed to * make to the embedding model specified on the corpus. This value is specific * to this job and not shared across other import jobs. Consult the Quotas * page on the project to set an appropriate value here. If unspecified, a * default value of 1,000 QPM would be used. */ maxEmbeddingRequestsPerMin?: number; /** * The BigQuery destination to write partial failures to. It should be a * bigquery table resource name (e.g. "bq://projectId.bqDatasetId.bqTableId"). * The dataset must exist. If the table does not exist, it will be created * with the expected schema. If the table exists, the schema will be validated * and data will be added to this existing table. Deprecated. Prefer to use * `import_result_bq_sink`. */ partialFailureBigquerySink?: GoogleCloudAiplatformV1BigQueryDestination; /** * The Cloud Storage path to write partial failures to. Deprecated. Prefer to * use `import_result_gcs_sink`. */ partialFailureGcsSink?: GoogleCloudAiplatformV1GcsDestination; /** * Specifies the transformation config for RagFiles. */ ragFileTransformationConfig?: GoogleCloudAiplatformV1RagFileTransformationConfig; /** * Slack channels with their corresponding access tokens. */ slackSource?: GoogleCloudAiplatformV1SlackSource; } function serializeGoogleCloudAiplatformV1ImportRagFilesConfig(data: any): GoogleCloudAiplatformV1ImportRagFilesConfig { return { ...data, slackSource: data["slackSource"] !== undefined ? serializeGoogleCloudAiplatformV1SlackSource(data["slackSource"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportRagFilesConfig(data: any): GoogleCloudAiplatformV1ImportRagFilesConfig { return { ...data, slackSource: data["slackSource"] !== undefined ? deserializeGoogleCloudAiplatformV1SlackSource(data["slackSource"]) : undefined, }; } /** * Request message for VertexRagDataService.ImportRagFiles. */ export interface GoogleCloudAiplatformV1ImportRagFilesRequest { /** * Required. The config for the RagFiles to be synced and imported into the * RagCorpus. VertexRagDataService.ImportRagFiles. */ importRagFilesConfig?: GoogleCloudAiplatformV1ImportRagFilesConfig; } function serializeGoogleCloudAiplatformV1ImportRagFilesRequest(data: any): GoogleCloudAiplatformV1ImportRagFilesRequest { return { ...data, importRagFilesConfig: data["importRagFilesConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ImportRagFilesConfig(data["importRagFilesConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportRagFilesRequest(data: any): GoogleCloudAiplatformV1ImportRagFilesRequest { return { ...data, importRagFilesConfig: data["importRagFilesConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ImportRagFilesConfig(data["importRagFilesConfig"]) : undefined, }; } /** * A representation of a collection of database items organized in a way that * allows for approximate nearest neighbor (a.k.a ANN) algorithms search. */ export interface GoogleCloudAiplatformV1Index { /** * Output only. Timestamp when this Index was created. */ readonly createTime?: Date; /** * Output only. The pointers to DeployedIndexes created from this Index. An * Index can be only deleted if all its DeployedIndexes had been undeployed * first. */ readonly deployedIndexes?: GoogleCloudAiplatformV1DeployedIndexRef[]; /** * The description of the Index. */ description?: string; /** * Required. The display name of the Index. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Immutable. Customer-managed encryption key spec for an Index. If set, this * Index and all sub-resources of this Index will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * Output only. Stats of the index resource. */ readonly indexStats?: GoogleCloudAiplatformV1IndexStats; /** * Immutable. The update method to use with this Index. If not set, * BATCH_UPDATE will be used by default. */ indexUpdateMethod?: | "INDEX_UPDATE_METHOD_UNSPECIFIED" | "BATCH_UPDATE" | "STREAM_UPDATE"; /** * The labels with user-defined metadata to organize your Indexes. Label keys * and values can be no longer than 64 characters (Unicode codepoints), can * only contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * An additional information about the Index; the schema of the metadata can * be found in metadata_schema. */ metadata?: any; /** * Immutable. Points to a YAML file stored on Google Cloud Storage describing * additional information about the Index, that is specific to it. Unset if * the Index does not have any additional information. The schema is defined * as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * Note: The URI given on output will be immutable and probably different, * including the URI scheme, than the one given on input. The output URI will * point to a location where the user only has a read access. */ metadataSchemaUri?: string; /** * Output only. The resource name of the Index. */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this Index was most recently updated. This * also includes any update to the contents of the Index. Note that Operations * working on this Index may have their * Operations.metadata.generic_metadata.update_time a little after the value * of this timestamp, yet that does not mean their results are not already * reflected in the Index. Result of any successfully completed Operation on * the Index is reflected in it. */ readonly updateTime?: Date; } /** * A datapoint of Index. */ export interface GoogleCloudAiplatformV1IndexDatapoint { /** * Optional. CrowdingTag of the datapoint, the number of neighbors to return * in each crowding can be configured during query. */ crowdingTag?: GoogleCloudAiplatformV1IndexDatapointCrowdingTag; /** * Required. Unique identifier of the datapoint. */ datapointId?: string; /** * Required. Feature embedding vector for dense index. An array of numbers * with the length of [NearestNeighborSearchConfig.dimensions]. */ featureVector?: number[]; /** * Optional. List of Restrict of the datapoint, used to perform "restricted * searches" where boolean rule are used to filter the subset of the database * eligible for matching. This uses numeric comparisons. */ numericRestricts?: GoogleCloudAiplatformV1IndexDatapointNumericRestriction[]; /** * Optional. List of Restrict of the datapoint, used to perform "restricted * searches" where boolean rule are used to filter the subset of the database * eligible for matching. This uses categorical tokens. See: * https://cloud.google.com/vertex-ai/docs/matching-engine/filtering */ restricts?: GoogleCloudAiplatformV1IndexDatapointRestriction[]; /** * Optional. Feature embedding vector for sparse index. */ sparseEmbedding?: GoogleCloudAiplatformV1IndexDatapointSparseEmbedding; } function serializeGoogleCloudAiplatformV1IndexDatapoint(data: any): GoogleCloudAiplatformV1IndexDatapoint { return { ...data, numericRestricts: data["numericRestricts"] !== undefined ? data["numericRestricts"].map((item: any) => (serializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(item))) : undefined, sparseEmbedding: data["sparseEmbedding"] !== undefined ? serializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data["sparseEmbedding"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1IndexDatapoint(data: any): GoogleCloudAiplatformV1IndexDatapoint { return { ...data, numericRestricts: data["numericRestricts"] !== undefined ? data["numericRestricts"].map((item: any) => (deserializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(item))) : undefined, sparseEmbedding: data["sparseEmbedding"] !== undefined ? deserializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data["sparseEmbedding"]) : undefined, }; } /** * Crowding tag is a constraint on a neighbor list produced by nearest neighbor * search requiring that no more than some value k' of the k neighbors returned * have the same value of crowding_attribute. */ export interface GoogleCloudAiplatformV1IndexDatapointCrowdingTag { /** * The attribute value used for crowding. The maximum number of neighbors to * return per crowding attribute value (per_crowding_attribute_num_neighbors) * is configured per-query. This field is ignored if * per_crowding_attribute_num_neighbors is larger than the total number of * neighbors to return for a given query. */ crowdingAttribute?: string; } /** * This field allows restricts to be based on numeric comparisons rather than * categorical tokens. */ export interface GoogleCloudAiplatformV1IndexDatapointNumericRestriction { /** * The namespace of this restriction. e.g.: cost. */ namespace?: string; /** * This MUST be specified for queries and must NOT be specified for * datapoints. */ op?: | "OPERATOR_UNSPECIFIED" | "LESS" | "LESS_EQUAL" | "EQUAL" | "GREATER_EQUAL" | "GREATER" | "NOT_EQUAL"; /** * Represents 64 bit float. */ valueDouble?: number; /** * Represents 32 bit float. */ valueFloat?: number; /** * Represents 64 bit integer. */ valueInt?: bigint; } function serializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(data: any): GoogleCloudAiplatformV1IndexDatapointNumericRestriction { return { ...data, valueInt: data["valueInt"] !== undefined ? String(data["valueInt"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(data: any): GoogleCloudAiplatformV1IndexDatapointNumericRestriction { return { ...data, valueInt: data["valueInt"] !== undefined ? BigInt(data["valueInt"]) : undefined, }; } /** * Restriction of a datapoint which describe its attributes(tokens) from each * of several attribute categories(namespaces). */ export interface GoogleCloudAiplatformV1IndexDatapointRestriction { /** * The attributes to allow in this namespace. e.g.: 'red' */ allowList?: string[]; /** * The attributes to deny in this namespace. e.g.: 'blue' */ denyList?: string[]; /** * The namespace of this restriction. e.g.: color. */ namespace?: string; } /** * Feature embedding vector for sparse index. An array of numbers whose values * are located in the specified dimensions. */ export interface GoogleCloudAiplatformV1IndexDatapointSparseEmbedding { /** * Required. The list of indexes for the embedding values of the sparse * vector. */ dimensions?: bigint[]; /** * Required. The list of embedding values of the sparse vector. */ values?: number[]; } function serializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data: any): GoogleCloudAiplatformV1IndexDatapointSparseEmbedding { return { ...data, dimensions: data["dimensions"] !== undefined ? data["dimensions"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data: any): GoogleCloudAiplatformV1IndexDatapointSparseEmbedding { return { ...data, dimensions: data["dimensions"] !== undefined ? data["dimensions"].map((item: any) => (BigInt(item))) : undefined, }; } /** * Indexes are deployed into it. An IndexEndpoint can have multiple * DeployedIndexes. */ export interface GoogleCloudAiplatformV1IndexEndpoint { /** * Output only. Timestamp when this IndexEndpoint was created. */ readonly createTime?: Date; /** * Output only. The indexes deployed in this endpoint. */ readonly deployedIndexes?: GoogleCloudAiplatformV1DeployedIndex[]; /** * The description of the IndexEndpoint. */ description?: string; /** * Required. The display name of the IndexEndpoint. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Deprecated: If true, expose the IndexEndpoint via private * service connect. Only one of the fields, network or * enable_private_service_connect, can be set. */ enablePrivateServiceConnect?: boolean; /** * Immutable. Customer-managed encryption key spec for an IndexEndpoint. If * set, this IndexEndpoint and all sub-resources of this IndexEndpoint will be * secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your IndexEndpoints. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. The resource name of the IndexEndpoint. */ readonly name?: string; /** * Optional. The full name of the Google Compute Engine * [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) * to which the IndexEndpoint should be peered. Private services access must * already be configured for the network. If left unspecified, the Endpoint is * not peered with any network. network and private_service_connect_config are * mutually exclusive. * [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in '12345', and {network} is network name. */ network?: string; /** * Optional. Configuration for private service connect. network and * private_service_connect_config are mutually exclusive. */ privateServiceConnectConfig?: GoogleCloudAiplatformV1PrivateServiceConnectConfig; /** * Output only. If public_endpoint_enabled is true, this field will be * populated with the domain name to use for this index endpoint. */ readonly publicEndpointDomainName?: string; /** * Optional. If true, the deployed index will be accessible through public * endpoint. */ publicEndpointEnabled?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this IndexEndpoint was last updated. This * timestamp is not updated when the endpoint's DeployedIndexes are updated, * e.g. due to updates of the original Indexes they are the deployments of. */ readonly updateTime?: Date; } /** * IndexPrivateEndpoints proto is used to provide paths for users to send * requests via private endpoints (e.g. private service access, private service * connect). To send request via private service access, use match_grpc_address. * To send request via private service connect, use service_attachment. */ export interface GoogleCloudAiplatformV1IndexPrivateEndpoints { /** * Output only. The ip address used to send match gRPC requests. */ readonly matchGrpcAddress?: string; /** * Output only. PscAutomatedEndpoints is populated if private service connect * is enabled if PscAutomatedConfig is set. */ readonly pscAutomatedEndpoints?: GoogleCloudAiplatformV1PscAutomatedEndpoints[]; /** * Output only. The name of the service attachment resource. Populated if * private service connect is enabled. */ readonly serviceAttachment?: string; } /** * Stats of the Index. */ export interface GoogleCloudAiplatformV1IndexStats { /** * Output only. The number of shards in the Index. */ readonly shardsCount?: number; /** * Output only. The number of sparse vectors in the Index. */ readonly sparseVectorsCount?: bigint; /** * Output only. The number of dense vectors in the Index. */ readonly vectorsCount?: bigint; } /** * Specifies Vertex AI owned input data to be used for training, and possibly * evaluating, the Model. */ export interface GoogleCloudAiplatformV1InputDataConfig { /** * Applicable only to custom training with Datasets that have DataItems and * Annotations. Cloud Storage URI that points to a YAML file describing the * annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * The schema files that can be used here are found in * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the * chosen schema must be consistent with metadata of the Dataset specified by * dataset_id. Only Annotations that both match this schema and belong to * DataItems not ignored by the split method are used in respectively * training, validation or test role, depending on the role of the DataItem * they are on. When used in conjunction with annotations_filter, the * Annotations used for training are filtered by both annotations_filter and * annotation_schema_uri. */ annotationSchemaUri?: string; /** * Applicable only to Datasets that have DataItems and Annotations. A filter * on Annotations of the Dataset. Only Annotations that both match this filter * and belong to DataItems not ignored by the split method are used in * respectively training, validation or test role, depending on the role of * the DataItem they are on (for the auto-assigned that role is decided by * Vertex AI). A filter with same syntax as the one used in ListAnnotations * may be used, but note here it filters across all Annotations of the * Dataset, and not just within a single DataItem. */ annotationsFilter?: string; /** * Only applicable to custom training with tabular Dataset with BigQuery * source. The BigQuery project location where the training data is to be * written to. In the given project a new dataset is created with name * `dataset___` where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All * training input data is written into that dataset. In the dataset three * tables are created, `training`, `validation` and `test`. * AIP_DATA_FORMAT * = "bigquery". * AIP_TRAINING_DATA_URI = * "bigquery_destination.dataset___.training" * AIP_VALIDATION_DATA_URI = * "bigquery_destination.dataset___.validation" * AIP_TEST_DATA_URI = * "bigquery_destination.dataset___.test" */ bigqueryDestination?: GoogleCloudAiplatformV1BigQueryDestination; /** * Required. The ID of the Dataset in the same Project and Location which * data will be used to train the Model. The Dataset must use schema * compatible with Model being trained, and what is compatible should be * described in the used TrainingPipeline's training_task_definition. For * tabular Datasets, all their data is exported to training, to pick and * choose from. */ datasetId?: string; /** * Split based on the provided filters for each set. */ filterSplit?: GoogleCloudAiplatformV1FilterSplit; /** * Split based on fractions defining the size of each set. */ fractionSplit?: GoogleCloudAiplatformV1FractionSplit; /** * The Cloud Storage location where the training data is to be written to. In * the given directory a new directory is created with name: `dataset---` * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All * training input data is written into that directory. The Vertex AI * environment variables representing Cloud Storage data URIs are represented * in the Cloud Storage wildcard format to support sharded data. e.g.: * "gs://.../training-*.jsonl" * AIP_DATA_FORMAT = "jsonl" for non-tabular * data, "csv" for tabular data * AIP_TRAINING_DATA_URI = * "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" * * AIP_VALIDATION_DATA_URI = * "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" * * AIP_TEST_DATA_URI = "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}" */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; /** * Whether to persist the ML use assignment to data item system labels. */ persistMlUseAssignment?: boolean; /** * Supported only for tabular Datasets. Split based on a predefined key. */ predefinedSplit?: GoogleCloudAiplatformV1PredefinedSplit; /** * Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery * (annotation set) under the Dataset specified by dataset_id used for * filtering Annotations for training. Only Annotations that are associated * with this SavedQuery are used in respectively training. When used in * conjunction with annotations_filter, the Annotations used for training are * filtered by both saved_query_id and annotations_filter. Only one of * saved_query_id and annotation_schema_uri should be specified as both of * them represent the same thing: problem type. */ savedQueryId?: string; /** * Supported only for tabular Datasets. Split based on the distribution of * the specified column. */ stratifiedSplit?: GoogleCloudAiplatformV1StratifiedSplit; /** * Supported only for tabular Datasets. Split based on the timestamp of the * input data pieces. */ timestampSplit?: GoogleCloudAiplatformV1TimestampSplit; } /** * A list of int64 values. */ export interface GoogleCloudAiplatformV1Int64Array { /** * A list of int64 values. */ values?: bigint[]; } function serializeGoogleCloudAiplatformV1Int64Array(data: any): GoogleCloudAiplatformV1Int64Array { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1Int64Array(data: any): GoogleCloudAiplatformV1Int64Array { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (BigInt(item))) : undefined, }; } /** * An attribution method that computes the Aumann-Shapley value taking * advantage of the model's fully differentiable structure. Refer to this paper * for more details: https://arxiv.org/abs/1703.01365 */ export interface GoogleCloudAiplatformV1IntegratedGradientsAttribution { /** * Config for IG with blur baseline. When enabled, a linear path from the * maximally blurred image to the input image is created. Using a blurred * baseline instead of zero (black image) is motivated by the BlurIG approach * explained here: https://arxiv.org/abs/2004.03383 */ blurBaselineConfig?: GoogleCloudAiplatformV1BlurBaselineConfig; /** * Config for SmoothGrad approximation of gradients. When enabled, the * gradients are approximated by averaging the gradients from noisy samples in * the vicinity of the inputs. Adding noise can help improve the computed * gradients. Refer to this paper for more details: * https://arxiv.org/pdf/1706.03825.pdf */ smoothGradConfig?: GoogleCloudAiplatformV1SmoothGradConfig; /** * Required. The number of steps for approximating the path integral. A good * value to start is 50 and gradually increase until the sum to diff property * is within the desired error range. Valid range of its value is [1, 100], * inclusively. */ stepCount?: number; } /** * The Jira source for the ImportRagFilesRequest. */ export interface GoogleCloudAiplatformV1JiraSource { /** * Required. The Jira queries. */ jiraQueries?: GoogleCloudAiplatformV1JiraSourceJiraQueries[]; } /** * JiraQueries contains the Jira queries and corresponding authentication. */ export interface GoogleCloudAiplatformV1JiraSourceJiraQueries { /** * Required. The SecretManager secret version resource name (e.g. * projects/{project}/secrets/{secret}/versions/{version}) storing the Jira * API key. See [Manage API tokens for your Atlassian * account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). */ apiKeyConfig?: GoogleCloudAiplatformV1ApiAuthApiKeyConfig; /** * A list of custom Jira queries to import. For information about JQL (Jira * Query Language), see * https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ */ customQueries?: string[]; /** * Required. The Jira email address. */ email?: string; /** * A list of Jira projects to import in their entirety. */ projects?: string[]; /** * Required. The Jira server URI. */ serverUri?: string; } /** * Contains information about the Large Model. */ export interface GoogleCloudAiplatformV1LargeModelReference { /** * Required. The unique name of the large Foundation or pre-built model. Like * "chat-bison", "text-bison". Or model name with version ID, like * "chat-bison@001", "text-bison@005", etc. */ name?: string; } /** * A subgraph of the overall lineage graph. Event edges connect Artifact and * Execution nodes. */ export interface GoogleCloudAiplatformV1LineageSubgraph { /** * The Artifact nodes in the subgraph. */ artifacts?: GoogleCloudAiplatformV1Artifact[]; /** * The Event edges between Artifacts and Executions in the subgraph. */ events?: GoogleCloudAiplatformV1Event[]; /** * The Execution nodes in the subgraph. */ executions?: GoogleCloudAiplatformV1Execution[]; } /** * Response message for DatasetService.ListAnnotations. */ export interface GoogleCloudAiplatformV1ListAnnotationsResponse { /** * A list of Annotations that matches the specified filter in the request. */ annotations?: GoogleCloudAiplatformV1Annotation[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for MetadataService.ListArtifacts. */ export interface GoogleCloudAiplatformV1ListArtifactsResponse { /** * The Artifacts retrieved from the MetadataStore. */ artifacts?: GoogleCloudAiplatformV1Artifact[]; /** * A token, which can be sent as ListArtifactsRequest.page_token to retrieve * the next page. If this field is not populated, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for JobService.ListBatchPredictionJobs */ export interface GoogleCloudAiplatformV1ListBatchPredictionJobsResponse { /** * List of BatchPredictionJobs in the requested page. */ batchPredictionJobs?: GoogleCloudAiplatformV1BatchPredictionJob[]; /** * A token to retrieve the next page of results. Pass to * ListBatchPredictionJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListBatchPredictionJobsResponse(data: any): GoogleCloudAiplatformV1ListBatchPredictionJobsResponse { return { ...data, batchPredictionJobs: data["batchPredictionJobs"] !== undefined ? data["batchPredictionJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1BatchPredictionJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListBatchPredictionJobsResponse(data: any): GoogleCloudAiplatformV1ListBatchPredictionJobsResponse { return { ...data, batchPredictionJobs: data["batchPredictionJobs"] !== undefined ? data["batchPredictionJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1BatchPredictionJob(item))) : undefined, }; } /** * Response with a list of CachedContents. */ export interface GoogleCloudAiplatformV1ListCachedContentsResponse { /** * List of cached contents. */ cachedContents?: GoogleCloudAiplatformV1CachedContent[]; /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListCachedContentsResponse(data: any): GoogleCloudAiplatformV1ListCachedContentsResponse { return { ...data, cachedContents: data["cachedContents"] !== undefined ? data["cachedContents"].map((item: any) => (serializeGoogleCloudAiplatformV1CachedContent(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListCachedContentsResponse(data: any): GoogleCloudAiplatformV1ListCachedContentsResponse { return { ...data, cachedContents: data["cachedContents"] !== undefined ? data["cachedContents"].map((item: any) => (deserializeGoogleCloudAiplatformV1CachedContent(item))) : undefined, }; } /** * Response message for MetadataService.ListContexts. */ export interface GoogleCloudAiplatformV1ListContextsResponse { /** * The Contexts retrieved from the MetadataStore. */ contexts?: GoogleCloudAiplatformV1Context[]; /** * A token, which can be sent as ListContextsRequest.page_token to retrieve * the next page. If this field is not populated, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for JobService.ListCustomJobs */ export interface GoogleCloudAiplatformV1ListCustomJobsResponse { /** * List of CustomJobs in the requested page. */ customJobs?: GoogleCloudAiplatformV1CustomJob[]; /** * A token to retrieve the next page of results. Pass to * ListCustomJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListCustomJobsResponse(data: any): GoogleCloudAiplatformV1ListCustomJobsResponse { return { ...data, customJobs: data["customJobs"] !== undefined ? data["customJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1CustomJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListCustomJobsResponse(data: any): GoogleCloudAiplatformV1ListCustomJobsResponse { return { ...data, customJobs: data["customJobs"] !== undefined ? data["customJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1CustomJob(item))) : undefined, }; } /** * Response message for DatasetService.ListDataItems. */ export interface GoogleCloudAiplatformV1ListDataItemsResponse { /** * A list of DataItems that matches the specified filter in the request. */ dataItems?: GoogleCloudAiplatformV1DataItem[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for JobService.ListDataLabelingJobs. */ export interface GoogleCloudAiplatformV1ListDataLabelingJobsResponse { /** * A list of DataLabelingJobs that matches the specified filter in the * request. */ dataLabelingJobs?: GoogleCloudAiplatformV1DataLabelingJob[]; /** * The standard List next-page token. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListDataLabelingJobsResponse(data: any): GoogleCloudAiplatformV1ListDataLabelingJobsResponse { return { ...data, dataLabelingJobs: data["dataLabelingJobs"] !== undefined ? data["dataLabelingJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1DataLabelingJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListDataLabelingJobsResponse(data: any): GoogleCloudAiplatformV1ListDataLabelingJobsResponse { return { ...data, dataLabelingJobs: data["dataLabelingJobs"] !== undefined ? data["dataLabelingJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1DataLabelingJob(item))) : undefined, }; } /** * Response message for DatasetService.ListDatasets. */ export interface GoogleCloudAiplatformV1ListDatasetsResponse { /** * A list of Datasets that matches the specified filter in the request. */ datasets?: GoogleCloudAiplatformV1Dataset[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for DatasetService.ListDatasetVersions. */ export interface GoogleCloudAiplatformV1ListDatasetVersionsResponse { /** * A list of DatasetVersions that matches the specified filter in the * request. */ datasetVersions?: GoogleCloudAiplatformV1DatasetVersion[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for ListDeploymentResourcePools method. */ export interface GoogleCloudAiplatformV1ListDeploymentResourcePoolsResponse { /** * The DeploymentResourcePools from the specified location. */ deploymentResourcePools?: GoogleCloudAiplatformV1DeploymentResourcePool[]; /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; } /** * Response message for EndpointService.ListEndpoints. */ export interface GoogleCloudAiplatformV1ListEndpointsResponse { /** * List of Endpoints in the requested page. */ endpoints?: GoogleCloudAiplatformV1Endpoint[]; /** * A token to retrieve the next page of results. Pass to * ListEndpointsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListEndpointsResponse(data: any): GoogleCloudAiplatformV1ListEndpointsResponse { return { ...data, endpoints: data["endpoints"] !== undefined ? data["endpoints"].map((item: any) => (serializeGoogleCloudAiplatformV1Endpoint(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListEndpointsResponse(data: any): GoogleCloudAiplatformV1ListEndpointsResponse { return { ...data, endpoints: data["endpoints"] !== undefined ? data["endpoints"].map((item: any) => (deserializeGoogleCloudAiplatformV1Endpoint(item))) : undefined, }; } /** * Response message for FeaturestoreService.ListEntityTypes. */ export interface GoogleCloudAiplatformV1ListEntityTypesResponse { /** * The EntityTypes matching the request. */ entityTypes?: GoogleCloudAiplatformV1EntityType[]; /** * A token, which can be sent as ListEntityTypesRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for MetadataService.ListExecutions. */ export interface GoogleCloudAiplatformV1ListExecutionsResponse { /** * The Executions retrieved from the MetadataStore. */ executions?: GoogleCloudAiplatformV1Execution[]; /** * A token, which can be sent as ListExecutionsRequest.page_token to retrieve * the next page. If this field is not populated, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeatureRegistryService.ListFeatureGroups. */ export interface GoogleCloudAiplatformV1ListFeatureGroupsResponse { /** * The FeatureGroups matching the request. */ featureGroups?: GoogleCloudAiplatformV1FeatureGroup[]; /** * A token, which can be sent as ListFeatureGroupsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeatureOnlineStoreAdminService.ListFeatureOnlineStores. */ export interface GoogleCloudAiplatformV1ListFeatureOnlineStoresResponse { /** * The FeatureOnlineStores matching the request. */ featureOnlineStores?: GoogleCloudAiplatformV1FeatureOnlineStore[]; /** * A token, which can be sent as ListFeatureOnlineStoresRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeaturestoreService.ListFeatures. Response message for * FeatureRegistryService.ListFeatures. */ export interface GoogleCloudAiplatformV1ListFeaturesResponse { /** * The Features matching the request. */ features?: GoogleCloudAiplatformV1Feature[]; /** * A token, which can be sent as ListFeaturesRequest.page_token to retrieve * the next page. If this field is omitted, there are no subsequent pages. */ nextPageToken?: string; } /** * Response message for FeaturestoreService.ListFeaturestores. */ export interface GoogleCloudAiplatformV1ListFeaturestoresResponse { /** * The Featurestores matching the request. */ featurestores?: GoogleCloudAiplatformV1Featurestore[]; /** * A token, which can be sent as ListFeaturestoresRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeatureOnlineStoreAdminService.ListFeatureViews. */ export interface GoogleCloudAiplatformV1ListFeatureViewsResponse { /** * The FeatureViews matching the request. */ featureViews?: GoogleCloudAiplatformV1FeatureView[]; /** * A token, which can be sent as ListFeatureViewsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListFeatureViewsResponse(data: any): GoogleCloudAiplatformV1ListFeatureViewsResponse { return { ...data, featureViews: data["featureViews"] !== undefined ? data["featureViews"].map((item: any) => (serializeGoogleCloudAiplatformV1FeatureView(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListFeatureViewsResponse(data: any): GoogleCloudAiplatformV1ListFeatureViewsResponse { return { ...data, featureViews: data["featureViews"] !== undefined ? data["featureViews"].map((item: any) => (deserializeGoogleCloudAiplatformV1FeatureView(item))) : undefined, }; } /** * Response message for FeatureOnlineStoreAdminService.ListFeatureViewSyncs. */ export interface GoogleCloudAiplatformV1ListFeatureViewSyncsResponse { /** * The FeatureViewSyncs matching the request. */ featureViewSyncs?: GoogleCloudAiplatformV1FeatureViewSync[]; /** * A token, which can be sent as ListFeatureViewSyncsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for JobService.ListHyperparameterTuningJobs */ export interface GoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse { /** * List of HyperparameterTuningJobs in the requested page. * HyperparameterTuningJob.trials of the jobs will be not be returned. */ hyperparameterTuningJobs?: GoogleCloudAiplatformV1HyperparameterTuningJob[]; /** * A token to retrieve the next page of results. Pass to * ListHyperparameterTuningJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse { return { ...data, hyperparameterTuningJobs: data["hyperparameterTuningJobs"] !== undefined ? data["hyperparameterTuningJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1HyperparameterTuningJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse { return { ...data, hyperparameterTuningJobs: data["hyperparameterTuningJobs"] !== undefined ? data["hyperparameterTuningJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(item))) : undefined, }; } /** * Response message for IndexEndpointService.ListIndexEndpoints. */ export interface GoogleCloudAiplatformV1ListIndexEndpointsResponse { /** * List of IndexEndpoints in the requested page. */ indexEndpoints?: GoogleCloudAiplatformV1IndexEndpoint[]; /** * A token to retrieve next page of results. Pass to * ListIndexEndpointsRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for IndexService.ListIndexes. */ export interface GoogleCloudAiplatformV1ListIndexesResponse { /** * List of indexes in the requested page. */ indexes?: GoogleCloudAiplatformV1Index[]; /** * A token to retrieve next page of results. Pass to * ListIndexesRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for MetadataService.ListMetadataSchemas. */ export interface GoogleCloudAiplatformV1ListMetadataSchemasResponse { /** * The MetadataSchemas found for the MetadataStore. */ metadataSchemas?: GoogleCloudAiplatformV1MetadataSchema[]; /** * A token, which can be sent as ListMetadataSchemasRequest.page_token to * retrieve the next page. If this field is not populated, there are no * subsequent pages. */ nextPageToken?: string; } /** * Response message for MetadataService.ListMetadataStores. */ export interface GoogleCloudAiplatformV1ListMetadataStoresResponse { /** * The MetadataStores found for the Location. */ metadataStores?: GoogleCloudAiplatformV1MetadataStore[]; /** * A token, which can be sent as ListMetadataStoresRequest.page_token to * retrieve the next page. If this field is not populated, there are no * subsequent pages. */ nextPageToken?: string; } /** * Response message for JobService.ListModelDeploymentMonitoringJobs. */ export interface GoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse { /** * A list of ModelDeploymentMonitoringJobs that matches the specified filter * in the request. */ modelDeploymentMonitoringJobs?: GoogleCloudAiplatformV1ModelDeploymentMonitoringJob[]; /** * The standard List next-page token. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse(data: any): GoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse { return { ...data, modelDeploymentMonitoringJobs: data["modelDeploymentMonitoringJobs"] !== undefined ? data["modelDeploymentMonitoringJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse(data: any): GoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse { return { ...data, modelDeploymentMonitoringJobs: data["modelDeploymentMonitoringJobs"] !== undefined ? data["modelDeploymentMonitoringJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(item))) : undefined, }; } /** * Response message for ModelService.ListModelEvaluationSlices. */ export interface GoogleCloudAiplatformV1ListModelEvaluationSlicesResponse { /** * List of ModelEvaluations in the requested page. */ modelEvaluationSlices?: GoogleCloudAiplatformV1ModelEvaluationSlice[]; /** * A token to retrieve next page of results. Pass to * ListModelEvaluationSlicesRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for ModelService.ListModelEvaluations. */ export interface GoogleCloudAiplatformV1ListModelEvaluationsResponse { /** * List of ModelEvaluations in the requested page. */ modelEvaluations?: GoogleCloudAiplatformV1ModelEvaluation[]; /** * A token to retrieve next page of results. Pass to * ListModelEvaluationsRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for ModelService.ListModels */ export interface GoogleCloudAiplatformV1ListModelsResponse { /** * List of Models in the requested page. */ models?: GoogleCloudAiplatformV1Model[]; /** * A token to retrieve next page of results. Pass to * ListModelsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListModelsResponse(data: any): GoogleCloudAiplatformV1ListModelsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (serializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListModelsResponse(data: any): GoogleCloudAiplatformV1ListModelsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (deserializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } /** * Response message for ModelService.ListModelVersions */ export interface GoogleCloudAiplatformV1ListModelVersionsResponse { /** * List of Model versions in the requested page. In the returned Model name * field, version ID instead of regvision tag will be included. */ models?: GoogleCloudAiplatformV1Model[]; /** * A token to retrieve the next page of results. Pass to * ListModelVersionsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListModelVersionsResponse(data: any): GoogleCloudAiplatformV1ListModelVersionsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (serializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListModelVersionsResponse(data: any): GoogleCloudAiplatformV1ListModelVersionsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (deserializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } /** * Response message for JobService.ListNasJobs */ export interface GoogleCloudAiplatformV1ListNasJobsResponse { /** * List of NasJobs in the requested page. NasJob.nas_job_output of the jobs * will not be returned. */ nasJobs?: GoogleCloudAiplatformV1NasJob[]; /** * A token to retrieve the next page of results. Pass to * ListNasJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListNasJobsResponse(data: any): GoogleCloudAiplatformV1ListNasJobsResponse { return { ...data, nasJobs: data["nasJobs"] !== undefined ? data["nasJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1NasJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListNasJobsResponse(data: any): GoogleCloudAiplatformV1ListNasJobsResponse { return { ...data, nasJobs: data["nasJobs"] !== undefined ? data["nasJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1NasJob(item))) : undefined, }; } /** * Response message for JobService.ListNasTrialDetails */ export interface GoogleCloudAiplatformV1ListNasTrialDetailsResponse { /** * List of top NasTrials in the requested page. */ nasTrialDetails?: GoogleCloudAiplatformV1NasTrialDetail[]; /** * A token to retrieve the next page of results. Pass to * ListNasTrialDetailsRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for [NotebookService.CreateNotebookExecutionJob] */ export interface GoogleCloudAiplatformV1ListNotebookExecutionJobsResponse { /** * A token to retrieve next page of results. Pass to * ListNotebookExecutionJobsRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of NotebookExecutionJobs in the requested page. */ notebookExecutionJobs?: GoogleCloudAiplatformV1NotebookExecutionJob[]; } function serializeGoogleCloudAiplatformV1ListNotebookExecutionJobsResponse(data: any): GoogleCloudAiplatformV1ListNotebookExecutionJobsResponse { return { ...data, notebookExecutionJobs: data["notebookExecutionJobs"] !== undefined ? data["notebookExecutionJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1NotebookExecutionJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListNotebookExecutionJobsResponse(data: any): GoogleCloudAiplatformV1ListNotebookExecutionJobsResponse { return { ...data, notebookExecutionJobs: data["notebookExecutionJobs"] !== undefined ? data["notebookExecutionJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1NotebookExecutionJob(item))) : undefined, }; } /** * Response message for NotebookService.ListNotebookRuntimes. */ export interface GoogleCloudAiplatformV1ListNotebookRuntimesResponse { /** * A token to retrieve next page of results. Pass to * ListNotebookRuntimesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of NotebookRuntimes in the requested page. */ notebookRuntimes?: GoogleCloudAiplatformV1NotebookRuntime[]; } /** * Response message for NotebookService.ListNotebookRuntimeTemplates. */ export interface GoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse { /** * A token to retrieve next page of results. Pass to * ListNotebookRuntimeTemplatesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of NotebookRuntimeTemplates in the requested page. */ notebookRuntimeTemplates?: GoogleCloudAiplatformV1NotebookRuntimeTemplate[]; } function serializeGoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse(data: any): GoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse { return { ...data, notebookRuntimeTemplates: data["notebookRuntimeTemplates"] !== undefined ? data["notebookRuntimeTemplates"].map((item: any) => (serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse(data: any): GoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse { return { ...data, notebookRuntimeTemplates: data["notebookRuntimeTemplates"] !== undefined ? data["notebookRuntimeTemplates"].map((item: any) => (deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(item))) : undefined, }; } /** * Request message for VizierService.ListOptimalTrials. */ export interface GoogleCloudAiplatformV1ListOptimalTrialsRequest { } /** * Response message for VizierService.ListOptimalTrials. */ export interface GoogleCloudAiplatformV1ListOptimalTrialsResponse { /** * The pareto-optimal Trials for multiple objective Study or the optimal * trial for single objective Study. The definition of pareto-optimal can be * checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency */ optimalTrials?: GoogleCloudAiplatformV1Trial[]; } /** * Response message for PersistentResourceService.ListPersistentResources */ export interface GoogleCloudAiplatformV1ListPersistentResourcesResponse { /** * A token to retrieve next page of results. Pass to * ListPersistentResourcesRequest.page_token to obtain that page. */ nextPageToken?: string; persistentResources?: GoogleCloudAiplatformV1PersistentResource[]; } function serializeGoogleCloudAiplatformV1ListPersistentResourcesResponse(data: any): GoogleCloudAiplatformV1ListPersistentResourcesResponse { return { ...data, persistentResources: data["persistentResources"] !== undefined ? data["persistentResources"].map((item: any) => (serializeGoogleCloudAiplatformV1PersistentResource(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListPersistentResourcesResponse(data: any): GoogleCloudAiplatformV1ListPersistentResourcesResponse { return { ...data, persistentResources: data["persistentResources"] !== undefined ? data["persistentResources"].map((item: any) => (deserializeGoogleCloudAiplatformV1PersistentResource(item))) : undefined, }; } /** * Response message for PipelineService.ListPipelineJobs */ export interface GoogleCloudAiplatformV1ListPipelineJobsResponse { /** * A token to retrieve the next page of results. Pass to * ListPipelineJobsRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of PipelineJobs in the requested page. */ pipelineJobs?: GoogleCloudAiplatformV1PipelineJob[]; } function serializeGoogleCloudAiplatformV1ListPipelineJobsResponse(data: any): GoogleCloudAiplatformV1ListPipelineJobsResponse { return { ...data, pipelineJobs: data["pipelineJobs"] !== undefined ? data["pipelineJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1PipelineJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListPipelineJobsResponse(data: any): GoogleCloudAiplatformV1ListPipelineJobsResponse { return { ...data, pipelineJobs: data["pipelineJobs"] !== undefined ? data["pipelineJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1PipelineJob(item))) : undefined, }; } /** * Response message for VertexRagDataService.ListRagCorpora. */ export interface GoogleCloudAiplatformV1ListRagCorporaResponse { /** * A token to retrieve the next page of results. Pass to * ListRagCorporaRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of RagCorpora in the requested page. */ ragCorpora?: GoogleCloudAiplatformV1RagCorpus[]; } /** * Response message for VertexRagDataService.ListRagFiles. */ export interface GoogleCloudAiplatformV1ListRagFilesResponse { /** * A token to retrieve the next page of results. Pass to * ListRagFilesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of RagFiles in the requested page. */ ragFiles?: GoogleCloudAiplatformV1RagFile[]; } function serializeGoogleCloudAiplatformV1ListRagFilesResponse(data: any): GoogleCloudAiplatformV1ListRagFilesResponse { return { ...data, ragFiles: data["ragFiles"] !== undefined ? data["ragFiles"].map((item: any) => (serializeGoogleCloudAiplatformV1RagFile(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListRagFilesResponse(data: any): GoogleCloudAiplatformV1ListRagFilesResponse { return { ...data, ragFiles: data["ragFiles"] !== undefined ? data["ragFiles"].map((item: any) => (deserializeGoogleCloudAiplatformV1RagFile(item))) : undefined, }; } /** * Response message for ReasoningEngineService.ListReasoningEngines */ export interface GoogleCloudAiplatformV1ListReasoningEnginesResponse { /** * A token to retrieve the next page of results. Pass to * ListReasoningEnginesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of ReasoningEngines in the requested page. */ reasoningEngines?: GoogleCloudAiplatformV1ReasoningEngine[]; } /** * Response message for DatasetService.ListSavedQueries. */ export interface GoogleCloudAiplatformV1ListSavedQueriesResponse { /** * The standard List next-page token. */ nextPageToken?: string; /** * A list of SavedQueries that match the specified filter in the request. */ savedQueries?: GoogleCloudAiplatformV1SavedQuery[]; } /** * Response message for ScheduleService.ListSchedules */ export interface GoogleCloudAiplatformV1ListSchedulesResponse { /** * A token to retrieve the next page of results. Pass to * ListSchedulesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of Schedules in the requested page. */ schedules?: GoogleCloudAiplatformV1Schedule[]; } function serializeGoogleCloudAiplatformV1ListSchedulesResponse(data: any): GoogleCloudAiplatformV1ListSchedulesResponse { return { ...data, schedules: data["schedules"] !== undefined ? data["schedules"].map((item: any) => (serializeGoogleCloudAiplatformV1Schedule(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListSchedulesResponse(data: any): GoogleCloudAiplatformV1ListSchedulesResponse { return { ...data, schedules: data["schedules"] !== undefined ? data["schedules"].map((item: any) => (deserializeGoogleCloudAiplatformV1Schedule(item))) : undefined, }; } /** * Response message for SpecialistPoolService.ListSpecialistPools. */ export interface GoogleCloudAiplatformV1ListSpecialistPoolsResponse { /** * The standard List next-page token. */ nextPageToken?: string; /** * A list of SpecialistPools that matches the specified filter in the * request. */ specialistPools?: GoogleCloudAiplatformV1SpecialistPool[]; } /** * Response message for VizierService.ListStudies. */ export interface GoogleCloudAiplatformV1ListStudiesResponse { /** * Passes this token as the `page_token` field of the request for a * subsequent call. If this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * The studies associated with the project. */ studies?: GoogleCloudAiplatformV1Study[]; } function serializeGoogleCloudAiplatformV1ListStudiesResponse(data: any): GoogleCloudAiplatformV1ListStudiesResponse { return { ...data, studies: data["studies"] !== undefined ? data["studies"].map((item: any) => (serializeGoogleCloudAiplatformV1Study(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListStudiesResponse(data: any): GoogleCloudAiplatformV1ListStudiesResponse { return { ...data, studies: data["studies"] !== undefined ? data["studies"].map((item: any) => (deserializeGoogleCloudAiplatformV1Study(item))) : undefined, }; } /** * Response message for TensorboardService.ListTensorboardExperiments. */ export interface GoogleCloudAiplatformV1ListTensorboardExperimentsResponse { /** * A token, which can be sent as ListTensorboardExperimentsRequest.page_token * to retrieve the next page. If this field is omitted, there are no * subsequent pages. */ nextPageToken?: string; /** * The TensorboardExperiments mathching the request. */ tensorboardExperiments?: GoogleCloudAiplatformV1TensorboardExperiment[]; } /** * Response message for TensorboardService.ListTensorboardRuns. */ export interface GoogleCloudAiplatformV1ListTensorboardRunsResponse { /** * A token, which can be sent as ListTensorboardRunsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; /** * The TensorboardRuns mathching the request. */ tensorboardRuns?: GoogleCloudAiplatformV1TensorboardRun[]; } /** * Response message for TensorboardService.ListTensorboards. */ export interface GoogleCloudAiplatformV1ListTensorboardsResponse { /** * A token, which can be sent as ListTensorboardsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; /** * The Tensorboards mathching the request. */ tensorboards?: GoogleCloudAiplatformV1Tensorboard[]; } /** * Response message for TensorboardService.ListTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse { /** * A token, which can be sent as ListTensorboardTimeSeriesRequest.page_token * to retrieve the next page. If this field is omitted, there are no * subsequent pages. */ nextPageToken?: string; /** * The TensorboardTimeSeries mathching the request. */ tensorboardTimeSeries?: GoogleCloudAiplatformV1TensorboardTimeSeries[]; } function serializeGoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (serializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } /** * Response message for PipelineService.ListTrainingPipelines */ export interface GoogleCloudAiplatformV1ListTrainingPipelinesResponse { /** * A token to retrieve the next page of results. Pass to * ListTrainingPipelinesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of TrainingPipelines in the requested page. */ trainingPipelines?: GoogleCloudAiplatformV1TrainingPipeline[]; } function serializeGoogleCloudAiplatformV1ListTrainingPipelinesResponse(data: any): GoogleCloudAiplatformV1ListTrainingPipelinesResponse { return { ...data, trainingPipelines: data["trainingPipelines"] !== undefined ? data["trainingPipelines"].map((item: any) => (serializeGoogleCloudAiplatformV1TrainingPipeline(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListTrainingPipelinesResponse(data: any): GoogleCloudAiplatformV1ListTrainingPipelinesResponse { return { ...data, trainingPipelines: data["trainingPipelines"] !== undefined ? data["trainingPipelines"].map((item: any) => (deserializeGoogleCloudAiplatformV1TrainingPipeline(item))) : undefined, }; } /** * Response message for VizierService.ListTrials. */ export interface GoogleCloudAiplatformV1ListTrialsResponse { /** * Pass this token as the `page_token` field of the request for a subsequent * call. If this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * The Trials associated with the Study. */ trials?: GoogleCloudAiplatformV1Trial[]; } /** * Response message for GenAiTuningService.ListTuningJobs */ export interface GoogleCloudAiplatformV1ListTuningJobsResponse { /** * A token to retrieve the next page of results. Pass to * ListTuningJobsRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of TuningJobs in the requested page. */ tuningJobs?: GoogleCloudAiplatformV1TuningJob[]; } function serializeGoogleCloudAiplatformV1ListTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListTuningJobsResponse { return { ...data, tuningJobs: data["tuningJobs"] !== undefined ? data["tuningJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1TuningJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListTuningJobsResponse { return { ...data, tuningJobs: data["tuningJobs"] !== undefined ? data["tuningJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1TuningJob(item))) : undefined, }; } /** * Logprobs Result */ export interface GoogleCloudAiplatformV1LogprobsResult { /** * Length = total number of decoding steps. The chosen candidates may or may * not be in top_candidates. */ chosenCandidates?: GoogleCloudAiplatformV1LogprobsResultCandidate[]; /** * Length = total number of decoding steps. */ topCandidates?: GoogleCloudAiplatformV1LogprobsResultTopCandidates[]; } /** * Candidate for the logprobs token and score. */ export interface GoogleCloudAiplatformV1LogprobsResultCandidate { /** * The candidate's log probability. */ logProbability?: number; /** * The candidate's token string value. */ token?: string; /** * The candidate's token id value. */ tokenId?: number; } /** * Candidates with top log probabilities at each decoding step. */ export interface GoogleCloudAiplatformV1LogprobsResultTopCandidates { /** * Sorted by log probability in descending order. */ candidates?: GoogleCloudAiplatformV1LogprobsResultCandidate[]; } /** * Request message for VizierService.LookupStudy. */ export interface GoogleCloudAiplatformV1LookupStudyRequest { /** * Required. The user-defined display name of the Study */ displayName?: string; } /** * Specification of a single machine. */ export interface GoogleCloudAiplatformV1MachineSpec { /** * The number of accelerators to attach to the machine. */ acceleratorCount?: number; /** * Immutable. The type of accelerator(s) that may be attached to the machine * as per accelerator_count. */ acceleratorType?: | "ACCELERATOR_TYPE_UNSPECIFIED" | "NVIDIA_TESLA_K80" | "NVIDIA_TESLA_P100" | "NVIDIA_TESLA_V100" | "NVIDIA_TESLA_P4" | "NVIDIA_TESLA_T4" | "NVIDIA_TESLA_A100" | "NVIDIA_A100_80GB" | "NVIDIA_L4" | "NVIDIA_H100_80GB" | "TPU_V2" | "TPU_V3" | "TPU_V4_POD" | "TPU_V5_LITEPOD"; /** * Immutable. The type of the machine. See the [list of machine types * supported for * prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) * See the [list of machine types supported for custom * training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). * For DeployedModel this field is optional, and the default value is * `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this * field is required. */ machineType?: string; /** * Optional. Immutable. Configuration controlling how this resource pool * consumes reservation. */ reservationAffinity?: GoogleCloudAiplatformV1ReservationAffinity; /** * Immutable. The topology of the TPUs. Corresponds to the TPU topologies * available from GKE. (Example: tpu_topology: "2x2x1"). */ tpuTopology?: string; } /** * Manual batch tuning parameters. */ export interface GoogleCloudAiplatformV1ManualBatchTuningParameters { /** * Immutable. The number of the records (e.g. instances) of the operation * given in each batch to a machine replica. Machine type, and size of a * single record should be considered when setting this parameter, higher * value speeds up the batch operation's execution, but too high value will * result in a whole batch not fitting in a machine's memory, and the whole * operation will fail. The default value is 64. */ batchSize?: number; } /** * A message representing a Measurement of a Trial. A Measurement contains the * Metrics got by executing a Trial using suggested hyperparameter values. */ export interface GoogleCloudAiplatformV1Measurement { /** * Output only. Time that the Trial has been running at the point of this * Measurement. */ readonly elapsedDuration?: number /* Duration */; /** * Output only. A list of metrics got by evaluating the objective functions * using suggested Parameter values. */ readonly metrics?: GoogleCloudAiplatformV1MeasurementMetric[]; /** * Output only. The number of steps the machine learning model has been * trained for. Must be non-negative. */ readonly stepCount?: bigint; } /** * A message representing a metric in the measurement. */ export interface GoogleCloudAiplatformV1MeasurementMetric { /** * Output only. The ID of the Metric. The Metric should be defined in * StudySpec's Metrics. */ readonly metricId?: string; /** * Output only. The value for this metric. */ readonly value?: number; } /** * Request message for ModelService.MergeVersionAliases. */ export interface GoogleCloudAiplatformV1MergeVersionAliasesRequest { /** * Required. The set of version aliases to merge. The alias should be at most * 128 characters, and match `a-z{0,126}[a-z-0-9]`. Add the `-` prefix to an * alias means removing that alias from the version. `-` is NOT counted in the * 128 characters. Example: `-golden` means removing the `golden` alias from * the version. There is NO ordering in aliases, which means 1) The aliases * returned from GetModel API might not have the exactly same order from this * MergeVersionAliases API. 2) Adding and deleting the same alias in the * request is not recommended, and the 2 operations will be cancelled out. */ versionAliases?: string[]; } /** * Instance of a general MetadataSchema. */ export interface GoogleCloudAiplatformV1MetadataSchema { /** * Output only. Timestamp when this MetadataSchema was created. */ readonly createTime?: Date; /** * Description of the Metadata Schema */ description?: string; /** * Output only. The resource name of the MetadataSchema. */ readonly name?: string; /** * Required. The raw YAML string representation of the MetadataSchema. The * combination of [MetadataSchema.version] and the schema name given by * `title` in [MetadataSchema.schema] must be unique within a MetadataStore. * The schema is defined as an OpenAPI 3.0.2 [MetadataSchema * Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject) */ schema?: string; /** * The type of the MetadataSchema. This is a property that identifies which * metadata types will use the MetadataSchema. */ schemaType?: | "METADATA_SCHEMA_TYPE_UNSPECIFIED" | "ARTIFACT_TYPE" | "EXECUTION_TYPE" | "CONTEXT_TYPE"; /** * The version of the MetadataSchema. The version's format must match the * following regular expression: `^[0-9]+.+.+$`, which would allow to * order/compare different versions. Example: 1.0.0, 1.0.1, etc. */ schemaVersion?: string; } /** * Instance of a metadata store. Contains a set of metadata that can be * queried. */ export interface GoogleCloudAiplatformV1MetadataStore { /** * Output only. Timestamp when this MetadataStore was created. */ readonly createTime?: Date; /** * Optional. Dataplex integration settings. */ dataplexConfig?: GoogleCloudAiplatformV1MetadataStoreDataplexConfig; /** * Description of the MetadataStore. */ description?: string; /** * Customer-managed encryption key spec for a Metadata Store. If set, this * Metadata Store and all sub-resources of this Metadata Store are secured * using this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. The resource name of the MetadataStore instance. */ readonly name?: string; /** * Output only. State information of the MetadataStore. */ readonly state?: GoogleCloudAiplatformV1MetadataStoreMetadataStoreState; /** * Output only. Timestamp when this MetadataStore was last updated. */ readonly updateTime?: Date; } /** * Represents Dataplex integration settings. */ export interface GoogleCloudAiplatformV1MetadataStoreDataplexConfig { /** * Optional. Whether or not Data Lineage synchronization is enabled for * Vertex Pipelines. */ enabledPipelinesLineage?: boolean; } /** * Represents state information for a MetadataStore. */ export interface GoogleCloudAiplatformV1MetadataStoreMetadataStoreState { /** * The disk utilization of the MetadataStore in bytes. */ diskUtilizationBytes?: bigint; } function serializeGoogleCloudAiplatformV1MetadataStoreMetadataStoreState(data: any): GoogleCloudAiplatformV1MetadataStoreMetadataStoreState { return { ...data, diskUtilizationBytes: data["diskUtilizationBytes"] !== undefined ? String(data["diskUtilizationBytes"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1MetadataStoreMetadataStoreState(data: any): GoogleCloudAiplatformV1MetadataStoreMetadataStoreState { return { ...data, diskUtilizationBytes: data["diskUtilizationBytes"] !== undefined ? BigInt(data["diskUtilizationBytes"]) : undefined, }; } /** * Input for MetricX metric. */ export interface GoogleCloudAiplatformV1MetricxInput { /** * Required. Metricx instance. */ instance?: GoogleCloudAiplatformV1MetricxInstance; /** * Required. Spec for Metricx metric. */ metricSpec?: GoogleCloudAiplatformV1MetricxSpec; } /** * Spec for MetricX instance - The fields used for evaluation are dependent on * the MetricX version. */ export interface GoogleCloudAiplatformV1MetricxInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; /** * Optional. Source text in original language. */ source?: string; } /** * Spec for MetricX result - calculates the MetricX score for the given * instance using the version specified in the spec. */ export interface GoogleCloudAiplatformV1MetricxResult { /** * Output only. MetricX score. Range depends on version. */ readonly score?: number; } /** * Spec for MetricX metric. */ export interface GoogleCloudAiplatformV1MetricxSpec { /** * Optional. Source language in BCP-47 format. */ sourceLanguage?: string; /** * Optional. Target language in BCP-47 format. Covers both prediction and * reference. */ targetLanguage?: string; /** * Required. Which version to use for evaluation. */ version?: | "METRICX_VERSION_UNSPECIFIED" | "METRICX_24_REF" | "METRICX_24_SRC" | "METRICX_24_SRC_REF"; } /** * Represents one resource that exists in automl.googleapis.com, * datalabeling.googleapis.com or ml.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResource { /** * Output only. Represents one Dataset in automl.googleapis.com. */ readonly automlDataset?: GoogleCloudAiplatformV1MigratableResourceAutomlDataset; /** * Output only. Represents one Model in automl.googleapis.com. */ readonly automlModel?: GoogleCloudAiplatformV1MigratableResourceAutomlModel; /** * Output only. Represents one Dataset in datalabeling.googleapis.com. */ readonly dataLabelingDataset?: GoogleCloudAiplatformV1MigratableResourceDataLabelingDataset; /** * Output only. Timestamp when the last migration attempt on this * MigratableResource started. Will not be set if there's no migration attempt * on this MigratableResource. */ readonly lastMigrateTime?: Date; /** * Output only. Timestamp when this MigratableResource was last updated. */ readonly lastUpdateTime?: Date; /** * Output only. Represents one Version in ml.googleapis.com. */ readonly mlEngineModelVersion?: GoogleCloudAiplatformV1MigratableResourceMlEngineModelVersion; } /** * Represents one Dataset in automl.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceAutomlDataset { /** * Full resource name of automl Dataset. Format: * `projects/{project}/locations/{location}/datasets/{dataset}`. */ dataset?: string; /** * The Dataset's display name in automl.googleapis.com. */ datasetDisplayName?: string; } /** * Represents one Model in automl.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceAutomlModel { /** * Full resource name of automl Model. Format: * `projects/{project}/locations/{location}/models/{model}`. */ model?: string; /** * The Model's display name in automl.googleapis.com. */ modelDisplayName?: string; } /** * Represents one Dataset in datalabeling.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceDataLabelingDataset { /** * The migratable AnnotatedDataset in datalabeling.googleapis.com belongs to * the data labeling Dataset. */ dataLabelingAnnotatedDatasets?: GoogleCloudAiplatformV1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset[]; /** * Full resource name of data labeling Dataset. Format: * `projects/{project}/datasets/{dataset}`. */ dataset?: string; /** * The Dataset's display name in datalabeling.googleapis.com. */ datasetDisplayName?: string; } /** * Represents one AnnotatedDataset in datalabeling.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset { /** * Full resource name of data labeling AnnotatedDataset. Format: * `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`. */ annotatedDataset?: string; /** * The AnnotatedDataset's display name in datalabeling.googleapis.com. */ annotatedDatasetDisplayName?: string; } /** * Represents one model Version in ml.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceMlEngineModelVersion { /** * The ml.googleapis.com endpoint that this model Version currently lives in. * Example values: * ml.googleapis.com * us-centrall-ml.googleapis.com * * europe-west4-ml.googleapis.com * asia-east1-ml.googleapis.com */ endpoint?: string; /** * Full resource name of ml engine model Version. Format: * `projects/{project}/models/{model}/versions/{version}`. */ version?: string; } /** * Config of migrating one resource from automl.googleapis.com, * datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. */ export interface GoogleCloudAiplatformV1MigrateResourceRequest { /** * Config for migrating Dataset in automl.googleapis.com to Vertex AI's * Dataset. */ migrateAutomlDatasetConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlDatasetConfig; /** * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. */ migrateAutomlModelConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlModelConfig; /** * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's * Dataset. */ migrateDataLabelingDatasetConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfig; /** * Config for migrating Version in ml.googleapis.com to Vertex AI's Model. */ migrateMlEngineModelVersionConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateMlEngineModelVersionConfig; } /** * Config for migrating Dataset in automl.googleapis.com to Vertex AI's * Dataset. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlDatasetConfig { /** * Required. Full resource name of automl Dataset. Format: * `projects/{project}/locations/{location}/datasets/{dataset}`. */ dataset?: string; /** * Required. Display name of the Dataset in Vertex AI. System will pick a * display name if unspecified. */ datasetDisplayName?: string; } /** * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlModelConfig { /** * Required. Full resource name of automl Model. Format: * `projects/{project}/locations/{location}/models/{model}`. */ model?: string; /** * Optional. Display name of the model in Vertex AI. System will pick a * display name if unspecified. */ modelDisplayName?: string; } /** * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's * Dataset. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfig { /** * Required. Full resource name of data labeling Dataset. Format: * `projects/{project}/datasets/{dataset}`. */ dataset?: string; /** * Optional. Display name of the Dataset in Vertex AI. System will pick a * display name if unspecified. */ datasetDisplayName?: string; /** * Optional. Configs for migrating AnnotatedDataset in * datalabeling.googleapis.com to Vertex AI's SavedQuery. The specified * AnnotatedDatasets have to belong to the datalabeling Dataset. */ migrateDataLabelingAnnotatedDatasetConfigs?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig[]; } /** * Config for migrating AnnotatedDataset in datalabeling.googleapis.com to * Vertex AI's SavedQuery. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig { /** * Required. Full resource name of data labeling AnnotatedDataset. Format: * `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`. */ annotatedDataset?: string; } /** * Config for migrating version in ml.googleapis.com to Vertex AI's Model. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateMlEngineModelVersionConfig { /** * Required. The ml.googleapis.com endpoint that this model version should be * migrated from. Example values: * ml.googleapis.com * * us-centrall-ml.googleapis.com * europe-west4-ml.googleapis.com * * asia-east1-ml.googleapis.com */ endpoint?: string; /** * Required. Display name of the model in Vertex AI. System will pick a * display name if unspecified. */ modelDisplayName?: string; /** * Required. Full resource name of ml engine model version. Format: * `projects/{project}/models/{model}/versions/{version}`. */ modelVersion?: string; } /** * Describes a successfully migrated resource. */ export interface GoogleCloudAiplatformV1MigrateResourceResponse { /** * Migrated Dataset's resource name. */ dataset?: string; /** * Before migration, the identifier in ml.googleapis.com, * automl.googleapis.com or datalabeling.googleapis.com. */ migratableResource?: GoogleCloudAiplatformV1MigratableResource; /** * Migrated Model's resource name. */ model?: string; } /** * A trained machine learning Model. */ export interface GoogleCloudAiplatformV1Model { /** * Immutable. The path to the directory containing the Model artifact and any * of its supporting files. Not required for AutoML Models. */ artifactUri?: string; /** * Optional. User input field to specify the base model source. Currently it * only supports specifing the Model Garden models and Genie models. */ baseModelSource?: GoogleCloudAiplatformV1ModelBaseModelSource; /** * Input only. The specification of the container that is to be used when * deploying this Model. The specification is ingested upon * ModelService.UploadModel, and all binaries it contains are copied and * stored internally by Vertex AI. Not required for AutoML Models. */ containerSpec?: GoogleCloudAiplatformV1ModelContainerSpec; /** * Output only. Timestamp when this Model was uploaded into Vertex AI. */ readonly createTime?: Date; /** * Stats of data used for training or evaluating the Model. Only populated * when the Model is trained by a TrainingPipeline with data_input_config. */ dataStats?: GoogleCloudAiplatformV1ModelDataStats; /** * Output only. The pointers to DeployedModels created from this Model. Note * that Model could have been deployed to Endpoints in different Locations. */ readonly deployedModels?: GoogleCloudAiplatformV1DeployedModelRef[]; /** * The description of the Model. */ description?: string; /** * Required. The display name of the Model. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for a Model. If set, this Model and * all sub-resources of this Model will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The default explanation specification for this Model. The Model can be * used for requesting explanation after being deployed if it is populated. * The Model can be used for batch explanation if it is populated. All fields * of the explanation_spec can be overridden by explanation_spec of * DeployModelRequest.deployed_model, or explanation_spec of * BatchPredictionJob. If the default explanation specification is not set for * this Model, this Model can still be used for requesting explanation by * setting explanation_spec of DeployModelRequest.deployed_model and for batch * explanation by setting explanation_spec of BatchPredictionJob. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * The labels with user-defined metadata to organize your Models. Label keys * and values can be no longer than 64 characters (Unicode codepoints), can * only contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * Immutable. An additional information about the Model; the schema of the * metadata can be found in metadata_schema. Unset if the Model does not have * any additional information. */ metadata?: any; /** * Output only. The resource name of the Artifact that was created in * MetadataStore when creating the Model. The Artifact resource name pattern * is * `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. */ readonly metadataArtifact?: string; /** * Immutable. Points to a YAML file stored on Google Cloud Storage describing * additional information about the Model, that is specific to it. Unset if * the Model does not have any additional information. The schema is defined * as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * AutoML Models always have this field populated by Vertex AI, if no * additional metadata is needed, this field is set to an empty string. Note: * The URI given on output will be immutable and probably different, including * the URI scheme, than the one given on input. The output URI will point to a * location where the user only has a read access. */ metadataSchemaUri?: string; /** * Output only. Source of a model. It can either be automl training pipeline, * custom training pipeline, BigQuery ML, or saved and tuned from Genie or * Model Garden. */ readonly modelSourceInfo?: GoogleCloudAiplatformV1ModelSourceInfo; /** * The resource name of the Model. */ name?: string; /** * Output only. If this Model is a copy of another Model, this contains info * about the original. */ readonly originalModelInfo?: GoogleCloudAiplatformV1ModelOriginalModelInfo; /** * Optional. This field is populated if the model is produced by a pipeline * job. */ pipelineJob?: string; /** * The schemata that describe formats of the Model's predictions and * explanations as given and returned via PredictionService.Predict and * PredictionService.Explain. */ predictSchemata?: GoogleCloudAiplatformV1PredictSchemata; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. When this Model is deployed, its prediction resources are * described by the `prediction_resources` field of the * Endpoint.deployed_models object. Because not all Models support all * resource configuration types, the configuration types this Model supports * are listed here. If no configuration types are listed, the Model cannot be * deployed to an Endpoint and does not support online predictions * (PredictionService.Predict or PredictionService.Explain). Such a Model can * serve predictions by using a BatchPredictionJob, if it has at least one * entry each in supported_input_storage_formats and * supported_output_storage_formats. */ readonly supportedDeploymentResourcesTypes?: | "DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED" | "DEDICATED_RESOURCES" | "AUTOMATIC_RESOURCES" | "SHARED_RESOURCES"[]; /** * Output only. The formats in which this Model may be exported. If empty, * this Model is not available for export. */ readonly supportedExportFormats?: GoogleCloudAiplatformV1ModelExportFormat[]; /** * Output only. The formats this Model supports in * BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri * exists, the instances should be given as per that schema. The possible * formats are: * `jsonl` The JSON Lines format, where each instance is a * single line. Uses GcsSource. * `csv` The CSV format, where each instance is * a single comma-separated line. The first line in the file is the header, * containing comma-separated field names. Uses GcsSource. * `tf-record` The * TFRecord format, where each instance is a single record in tfrecord syntax. * Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is * gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in * BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the * location of an instance to process, uses `gcs_source` field of the * InputConfig object. If this Model doesn't support any of these formats it * means it cannot be used with a BatchPredictionJob. However, if it has * supported_deployment_resources_types, it could serve online predictions by * using PredictionService.Predict or PredictionService.Explain. */ readonly supportedInputStorageFormats?: string[]; /** * Output only. The formats this Model supports in * BatchPredictionJob.output_config. If both * PredictSchemata.instance_schema_uri and * PredictSchemata.prediction_schema_uri exist, the predictions are returned * together with their instances. In other words, the prediction has the * original instance data first, followed by the actual prediction content (as * per the schema). The possible formats are: * `jsonl` The JSON Lines format, * where each prediction is a single line. Uses GcsDestination. * `csv` The * CSV format, where each prediction is a single comma-separated line. The * first line in the file is the header, containing comma-separated field * names. Uses GcsDestination. * `bigquery` Each prediction is a single row in * a BigQuery table, uses BigQueryDestination . If this Model doesn't support * any of these formats it means it cannot be used with a BatchPredictionJob. * However, if it has supported_deployment_resources_types, it could serve * online predictions by using PredictionService.Predict or * PredictionService.Explain. */ readonly supportedOutputStorageFormats?: string[]; /** * Output only. The resource name of the TrainingPipeline that uploaded this * Model, if any. */ readonly trainingPipeline?: string; /** * Output only. Timestamp when this Model was most recently updated. */ readonly updateTime?: Date; /** * User provided version aliases so that a model version can be referenced * via alias (i.e. * `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` * instead of auto-generated version id (i.e. * `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. * The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default * version alias will be created for the first version of the model, and there * must be exactly one default version alias for a model. */ versionAliases?: string[]; /** * Output only. Timestamp when this version was created. */ readonly versionCreateTime?: Date; /** * The description of this version. */ versionDescription?: string; /** * Output only. Immutable. The version ID of the model. A new version is * committed when a new model version is uploaded or trained under an existing * model id. It is an auto-incrementing decimal number in string * representation. */ readonly versionId?: string; /** * Output only. Timestamp when this version was most recently updated. */ readonly versionUpdateTime?: Date; } function serializeGoogleCloudAiplatformV1Model(data: any): GoogleCloudAiplatformV1Model { return { ...data, containerSpec: data["containerSpec"] !== undefined ? serializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, dataStats: data["dataStats"] !== undefined ? serializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Model(data: any): GoogleCloudAiplatformV1Model { return { ...data, containerSpec: data["containerSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, dataStats: data["dataStats"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, versionCreateTime: data["versionCreateTime"] !== undefined ? new Date(data["versionCreateTime"]) : undefined, versionUpdateTime: data["versionUpdateTime"] !== undefined ? new Date(data["versionUpdateTime"]) : undefined, }; } /** * User input field to specify the base model source. Currently it only * supports specifing the Model Garden models and Genie models. */ export interface GoogleCloudAiplatformV1ModelBaseModelSource { /** * Information about the base model of Genie models. */ genieSource?: GoogleCloudAiplatformV1GenieSource; /** * Source information of Model Garden models. */ modelGardenSource?: GoogleCloudAiplatformV1ModelGardenSource; } /** * Specification of a container for serving predictions. Some fields in this * message correspond to fields in the [Kubernetes Container v1 core * specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ export interface GoogleCloudAiplatformV1ModelContainerSpec { /** * Immutable. Specifies arguments for the command that runs when the * container starts. This overrides the container's * [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify * this field as an array of executable and arguments, similar to a Docker * `CMD`'s "default parameters" form. If you don't specify this field but do * specify the command field, then the command from the `command` field runs * without any additional arguments. See the [Kubernetes documentation about * how the `command` and `args` fields interact with a container's * `ENTRYPOINT` and * `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). * If you don't specify this field and don't specify the `command` field, then * the container's * [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and * `CMD` determine what runs based on their default behavior. See the Docker * documentation about [how `CMD` and `ENTRYPOINT` * interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). * In this field, you can reference [environment variables set by Vertex * AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) * and environment variables set in the env field. You cannot reference * environment variables set in the Docker image. In order for environment * variables to be expanded, reference them by using the following syntax: $( * VARIABLE_NAME) Note that this differs from Bash variable expansion, which * does not use parentheses. If a variable cannot be resolved, the reference * in the input string is used unchanged. To avoid variable expansion, you can * escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field * corresponds to the `args` field of the Kubernetes Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ args?: string[]; /** * Immutable. Specifies the command that runs when the container starts. This * overrides the container's * [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). * Specify this field as an array of executable and arguments, similar to a * Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not * specify this field, then the container's `ENTRYPOINT` runs, in conjunction * with the args field or the container's * [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either * exists. If this field is not specified and the container does not have an * `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and * `ENTRYPOINT` * interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). * If you specify this field, then you can also specify the `args` field to * provide additional arguments for this command. However, if you specify this * field, then the container's `CMD` is ignored. See the [Kubernetes * documentation about how the `command` and `args` fields interact with a * container's `ENTRYPOINT` and * `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). * In this field, you can reference [environment variables set by Vertex * AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) * and environment variables set in the env field. You cannot reference * environment variables set in the Docker image. In order for environment * variables to be expanded, reference them by using the following syntax: $( * VARIABLE_NAME) Note that this differs from Bash variable expansion, which * does not use parentheses. If a variable cannot be resolved, the reference * in the input string is used unchanged. To avoid variable expansion, you can * escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field * corresponds to the `command` field of the Kubernetes Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ command?: string[]; /** * Immutable. Deployment timeout. Limit for deployment timeout is 2 hours. */ deploymentTimeout?: number /* Duration */; /** * Immutable. List of environment variables to set in the container. After * the container starts running, code running in the container can read these * environment variables. Additionally, the command and args fields can * reference these variables. Later entries in this list can also reference * earlier entries. For example, the following example sets the variable * `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": * "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" } ] ``` If you switch * the order of the variables in the example, then the expansion does not * occur. This field corresponds to the `env` field of the Kubernetes * Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ env?: GoogleCloudAiplatformV1EnvVar[]; /** * Immutable. List of ports to expose from the container. Vertex AI sends * gRPC prediction requests that it receives to the first port on this list. * Vertex AI also sends liveness and health checks to this port. If you do not * specify this field, gRPC requests to the container will be disabled. Vertex * AI does not use ports other than the first one listed. This field * corresponds to the `ports` field of the Kubernetes Containers v1 core API. */ grpcPorts?: GoogleCloudAiplatformV1Port[]; /** * Immutable. Specification for Kubernetes readiness probe. */ healthProbe?: GoogleCloudAiplatformV1Probe; /** * Immutable. HTTP path on the container to send health checks to. Vertex AI * intermittently sends GET requests to this path on the container's IP * address and port to check that the container is healthy. Read more about * [health * checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). * For example, if you set this field to `/bar`, then Vertex AI intermittently * sends a GET request to the `/bar` path on the port of your container * specified by the first value of this `ModelContainerSpec`'s ports field. If * you don't specify this field, it defaults to the following value when you * deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/ * DEPLOYED_MODEL:predict The placeholders in this value are replaced as * follows: * ENDPOINT: The last segment (following `endpoints/`)of the * Endpoint.name][] field of the Endpoint where this Model has been deployed. * (Vertex AI makes this value available to your container code as the * [`AIP_ENDPOINT_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes * this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` * environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ healthRoute?: string; /** * Required. Immutable. URI of the Docker image to be used as the custom * container for serving predictions. This URI must identify an image in * Artifact Registry or Container Registry. Learn more about the [container * publishing * requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), * including permissions requirements for the Vertex AI Service Agent. The * container image is ingested upon ModelService.UploadModel, stored * internally, and this original path is afterwards not used. To learn about * the requirements for the Docker image itself, see [Custom container * requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). * You can use the URI to one of Vertex AI's [pre-built container images for * prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) * in this field. */ imageUri?: string; /** * Immutable. List of ports to expose from the container. Vertex AI sends any * prediction requests that it receives to the first port on this list. Vertex * AI also sends [liveness and health * checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) * to this port. If you do not specify this field, it defaults to following * value: ```json [ { "containerPort": 8080 } ] ``` Vertex AI does not use * ports other than the first one listed. This field corresponds to the * `ports` field of the Kubernetes Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ ports?: GoogleCloudAiplatformV1Port[]; /** * Immutable. HTTP path on the container to send prediction requests to. * Vertex AI forwards requests sent using projects.locations.endpoints.predict * to this path on the container's IP address and port. Vertex AI then returns * the container's response in the API response. For example, if you set this * field to `/foo`, then when Vertex AI receives a prediction request, it * forwards the request body in a POST request to the `/foo` path on the port * of your container specified by the first value of this * `ModelContainerSpec`'s ports field. If you don't specify this field, it * defaults to the following value when you deploy this Model to an Endpoint: * /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The * placeholders in this value are replaced as follows: * ENDPOINT: The last * segment (following `endpoints/`)of the Endpoint.name][] field of the * Endpoint where this Model has been deployed. (Vertex AI makes this value * available to your container code as the [`AIP_ENDPOINT_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes * this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` * environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ predictRoute?: string; /** * Immutable. The amount of the VM memory to reserve as the shared memory for * the model in megabytes. */ sharedMemorySizeMb?: bigint; /** * Immutable. Specification for Kubernetes startup probe. */ startupProbe?: GoogleCloudAiplatformV1Probe; } function serializeGoogleCloudAiplatformV1ModelContainerSpec(data: any): GoogleCloudAiplatformV1ModelContainerSpec { return { ...data, deploymentTimeout: data["deploymentTimeout"] !== undefined ? data["deploymentTimeout"] : undefined, sharedMemorySizeMb: data["sharedMemorySizeMb"] !== undefined ? String(data["sharedMemorySizeMb"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelContainerSpec(data: any): GoogleCloudAiplatformV1ModelContainerSpec { return { ...data, deploymentTimeout: data["deploymentTimeout"] !== undefined ? data["deploymentTimeout"] : undefined, sharedMemorySizeMb: data["sharedMemorySizeMb"] !== undefined ? BigInt(data["sharedMemorySizeMb"]) : undefined, }; } /** * Stats of data used for train or evaluate the Model. */ export interface GoogleCloudAiplatformV1ModelDataStats { /** * Number of Annotations that are used for evaluating this Model. If the * Model is evaluated multiple times, this will be the number of test * Annotations used by the first evaluation. If the Model is not evaluated, * the number is 0. */ testAnnotationsCount?: bigint; /** * Number of DataItems that were used for evaluating this Model. If the Model * is evaluated multiple times, this will be the number of test DataItems used * by the first evaluation. If the Model is not evaluated, the number is 0. */ testDataItemsCount?: bigint; /** * Number of Annotations that are used for training this Model. */ trainingAnnotationsCount?: bigint; /** * Number of DataItems that were used for training this Model. */ trainingDataItemsCount?: bigint; /** * Number of Annotations that are used for validating this Model during * training. */ validationAnnotationsCount?: bigint; /** * Number of DataItems that were used for validating this Model during * training. */ validationDataItemsCount?: bigint; } function serializeGoogleCloudAiplatformV1ModelDataStats(data: any): GoogleCloudAiplatformV1ModelDataStats { return { ...data, testAnnotationsCount: data["testAnnotationsCount"] !== undefined ? String(data["testAnnotationsCount"]) : undefined, testDataItemsCount: data["testDataItemsCount"] !== undefined ? String(data["testDataItemsCount"]) : undefined, trainingAnnotationsCount: data["trainingAnnotationsCount"] !== undefined ? String(data["trainingAnnotationsCount"]) : undefined, trainingDataItemsCount: data["trainingDataItemsCount"] !== undefined ? String(data["trainingDataItemsCount"]) : undefined, validationAnnotationsCount: data["validationAnnotationsCount"] !== undefined ? String(data["validationAnnotationsCount"]) : undefined, validationDataItemsCount: data["validationDataItemsCount"] !== undefined ? String(data["validationDataItemsCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDataStats(data: any): GoogleCloudAiplatformV1ModelDataStats { return { ...data, testAnnotationsCount: data["testAnnotationsCount"] !== undefined ? BigInt(data["testAnnotationsCount"]) : undefined, testDataItemsCount: data["testDataItemsCount"] !== undefined ? BigInt(data["testDataItemsCount"]) : undefined, trainingAnnotationsCount: data["trainingAnnotationsCount"] !== undefined ? BigInt(data["trainingAnnotationsCount"]) : undefined, trainingDataItemsCount: data["trainingDataItemsCount"] !== undefined ? BigInt(data["trainingDataItemsCount"]) : undefined, validationAnnotationsCount: data["validationAnnotationsCount"] !== undefined ? BigInt(data["validationAnnotationsCount"]) : undefined, validationDataItemsCount: data["validationDataItemsCount"] !== undefined ? BigInt(data["validationDataItemsCount"]) : undefined, }; } /** * ModelDeploymentMonitoringBigQueryTable specifies the BigQuery table name as * well as some information of the logs stored in this table. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringBigQueryTable { /** * The created BigQuery table to store logs. Customer could do their own * query & analysis. Format: `bq://.model_deployment_monitoring_._` */ bigqueryTablePath?: string; /** * The source of log. */ logSource?: | "LOG_SOURCE_UNSPECIFIED" | "TRAINING" | "SERVING"; /** * The type of log. */ logType?: | "LOG_TYPE_UNSPECIFIED" | "PREDICT" | "EXPLAIN"; /** * Output only. The schema version of the request/response logging BigQuery * table. Default to v1 if unset. */ readonly requestResponseLoggingSchemaVersion?: string; } /** * Represents a job that runs periodically to monitor the deployed models in an * endpoint. It will analyze the logged training & prediction data to detect any * abnormal behaviors. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringJob { /** * YAML schema file uri describing the format of a single instance that you * want Tensorflow Data Validation (TFDV) to analyze. If this field is empty, * all the feature data types are inferred from predict_instance_schema_uri, * meaning that TFDV will use the data in the exact format(data type) as * prediction request/response. If there are any data type differences between * predict instance and TFDV instance, this field can be used to override the * schema. For models trained with Vertex AI, this field must be set as all * the fields in predict instance formatted as string. */ analysisInstanceSchemaUri?: string; /** * Output only. The created bigquery tables for the job under customer * project. Customer could do their own query & analysis. There could be 4 log * tables in maximum: 1. Training data logging predict request/response 2. * Serving data logging predict request/response */ readonly bigqueryTables?: GoogleCloudAiplatformV1ModelDeploymentMonitoringBigQueryTable[]; /** * Output only. Timestamp when this ModelDeploymentMonitoringJob was created. */ readonly createTime?: Date; /** * Required. The user-defined name of the ModelDeploymentMonitoringJob. The * name can be up to 128 characters long and can consist of any UTF-8 * characters. Display name of a ModelDeploymentMonitoringJob. */ displayName?: string; /** * If true, the scheduled monitoring pipeline logs are sent to Google Cloud * Logging, including pipeline status and anomalies detected. Please note the * logs incur cost, which are subject to [Cloud Logging * pricing](https://cloud.google.com/logging#pricing). */ enableMonitoringPipelineLogs?: boolean; /** * Customer-managed encryption key spec for a ModelDeploymentMonitoringJob. * If set, this ModelDeploymentMonitoringJob and all sub-resources of this * ModelDeploymentMonitoringJob will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Required. Endpoint resource name. Format: * `projects/{project}/locations/{location}/endpoints/{endpoint}` */ endpoint?: string; /** * Output only. Only populated when the job's state is `JOB_STATE_FAILED` or * `JOB_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * The labels with user-defined metadata to organize your * ModelDeploymentMonitoringJob. Label keys and values can be no longer than * 64 characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * Output only. Latest triggered monitoring pipeline metadata. */ readonly latestMonitoringPipelineMetadata?: GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata; /** * Required. Sample Strategy for logging. */ loggingSamplingStrategy?: GoogleCloudAiplatformV1SamplingStrategy; /** * The TTL of BigQuery tables in user projects which stores logs. A day is * the basic unit of the TTL and we take the ceil of TTL/86400(a day). e.g. { * second: 3600} indicates ttl = 1 day. */ logTtl?: number /* Duration */; /** * Required. The config for monitoring objectives. This is a per * DeployedModel config. Each DeployedModel needs to be configured separately. */ modelDeploymentMonitoringObjectiveConfigs?: GoogleCloudAiplatformV1ModelDeploymentMonitoringObjectiveConfig[]; /** * Required. Schedule config for running the monitoring job. */ modelDeploymentMonitoringScheduleConfig?: GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig; /** * Alert config for model monitoring. */ modelMonitoringAlertConfig?: GoogleCloudAiplatformV1ModelMonitoringAlertConfig; /** * Output only. Resource name of a ModelDeploymentMonitoringJob. */ readonly name?: string; /** * Output only. Timestamp when this monitoring pipeline will be scheduled to * run for the next round. */ readonly nextScheduleTime?: Date; /** * YAML schema file uri describing the format of a single instance, which are * given to format this Endpoint's prediction (and explanation). If not set, * we will generate predict schema from collected predict requests. */ predictInstanceSchemaUri?: string; /** * Sample Predict instance, same format as PredictRequest.instances, this can * be set as a replacement of * ModelDeploymentMonitoringJob.predict_instance_schema_uri. If not set, we * will generate predict schema from collected predict requests. */ samplePredictInstance?: any; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Schedule state when the monitoring job is in Running state. */ readonly scheduleState?: | "MONITORING_SCHEDULE_STATE_UNSPECIFIED" | "PENDING" | "OFFLINE" | "RUNNING"; /** * Output only. The detailed state of the monitoring job. When the job is * still creating, the state will be 'PENDING'. Once the job is successfully * created, the state will be 'RUNNING'. Pause the job, the state will be * 'PAUSED'. Resume the job, the state will return to 'RUNNING'. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Stats anomalies base folder path. */ statsAnomaliesBaseDirectory?: GoogleCloudAiplatformV1GcsDestination; /** * Output only. Timestamp when this ModelDeploymentMonitoringJob was updated * most recently. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJob { return { ...data, logTtl: data["logTtl"] !== undefined ? data["logTtl"] : undefined, modelDeploymentMonitoringScheduleConfig: data["modelDeploymentMonitoringScheduleConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data["modelDeploymentMonitoringScheduleConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, latestMonitoringPipelineMetadata: data["latestMonitoringPipelineMetadata"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata(data["latestMonitoringPipelineMetadata"]) : undefined, logTtl: data["logTtl"] !== undefined ? data["logTtl"] : undefined, modelDeploymentMonitoringScheduleConfig: data["modelDeploymentMonitoringScheduleConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data["modelDeploymentMonitoringScheduleConfig"]) : undefined, nextScheduleTime: data["nextScheduleTime"] !== undefined ? new Date(data["nextScheduleTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * All metadata of most recent monitoring pipelines. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { /** * The time that most recent monitoring pipelines that is related to this * run. */ runTime?: Date; /** * The status of the most recent monitoring pipeline. */ status?: GoogleRpcStatus; } function serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { return { ...data, runTime: data["runTime"] !== undefined ? data["runTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { return { ...data, runTime: data["runTime"] !== undefined ? new Date(data["runTime"]) : undefined, }; } /** * ModelDeploymentMonitoringObjectiveConfig contains the pair of * deployed_model_id to ModelMonitoringObjectiveConfig. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringObjectiveConfig { /** * The DeployedModel ID of the objective config. */ deployedModelId?: string; /** * The objective config of for the modelmonitoring job of this deployed * model. */ objectiveConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfig; } /** * The config for scheduling monitoring job. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig { /** * Required. The model monitoring job scheduling interval. It will be rounded * up to next full hour. This defines how often the monitoring jobs are * triggered. */ monitorInterval?: number /* Duration */; /** * The time window of the prediction data being included in each prediction * dataset. This window specifies how long the data should be collected from * historical model results for each run. If not set, * ModelDeploymentMonitoringScheduleConfig.monitor_interval will be used. e.g. * If currently the cutoff time is 2022-01-08 14:30:00 and the monitor_window * is set to be 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 * 14:30:00 will be retrieved and aggregated to calculate the monitoring * statistics. */ monitorWindow?: number /* Duration */; } function serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig { return { ...data, monitorInterval: data["monitorInterval"] !== undefined ? data["monitorInterval"] : undefined, monitorWindow: data["monitorWindow"] !== undefined ? data["monitorWindow"] : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig { return { ...data, monitorInterval: data["monitorInterval"] !== undefined ? data["monitorInterval"] : undefined, monitorWindow: data["monitorWindow"] !== undefined ? data["monitorWindow"] : undefined, }; } /** * A collection of metrics calculated by comparing Model's predictions on all * of the test data against annotations from the test data. */ export interface GoogleCloudAiplatformV1ModelEvaluation { /** * Points to a YAML file stored on Google Cloud Storage describing * EvaluatedDataItemView.predictions, EvaluatedDataItemView.ground_truths, * EvaluatedAnnotation.predictions, and EvaluatedAnnotation.ground_truths. The * schema is defined as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * This field is not populated if there are neither EvaluatedDataItemViews nor * EvaluatedAnnotations under this ModelEvaluation. */ annotationSchemaUri?: string; /** * Output only. Timestamp when this ModelEvaluation was created. */ readonly createTime?: Date; /** * Points to a YAML file stored on Google Cloud Storage describing * EvaluatedDataItemView.data_item_payload and * EvaluatedAnnotation.data_item_payload. The schema is defined as an OpenAPI * 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * This field is not populated if there are neither EvaluatedDataItemViews nor * EvaluatedAnnotations under this ModelEvaluation. */ dataItemSchemaUri?: string; /** * The display name of the ModelEvaluation. */ displayName?: string; /** * Describes the values of ExplanationSpec that are used for explaining the * predicted values on the evaluated data. */ explanationSpecs?: GoogleCloudAiplatformV1ModelEvaluationModelEvaluationExplanationSpec[]; /** * The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from * Managed Pipeline, metadata contains a structured value with keys of * "pipeline_job_id", "evaluation_dataset_type", "evaluation_dataset_path", * "row_based_metrics_path". */ metadata?: any; /** * Evaluation metrics of the Model. The schema of the metrics is stored in * metrics_schema_uri */ metrics?: any; /** * Points to a YAML file stored on Google Cloud Storage describing the * metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 * [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ metricsSchemaUri?: string; /** * Aggregated explanation metrics for the Model's prediction output over the * data this ModelEvaluation uses. This field is populated only if the Model * is evaluated with explanations, and only for AutoML tabular Models. */ modelExplanation?: GoogleCloudAiplatformV1ModelExplanation; /** * Output only. The resource name of the ModelEvaluation. */ readonly name?: string; /** * All possible dimensions of ModelEvaluationSlices. The dimensions can be * used as the filter of the ModelService.ListModelEvaluationSlices request, * in the form of `slice.dimension = `. */ sliceDimensions?: string[]; } export interface GoogleCloudAiplatformV1ModelEvaluationModelEvaluationExplanationSpec { /** * Explanation spec details. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * Explanation type. For AutoML Image Classification models, possible values * are: * `image-integrated-gradients` * `image-xrai` */ explanationType?: string; } /** * A collection of metrics calculated by comparing Model's predictions on a * slice of the test data against ground truth annotations. */ export interface GoogleCloudAiplatformV1ModelEvaluationSlice { /** * Output only. Timestamp when this ModelEvaluationSlice was created. */ readonly createTime?: Date; /** * Output only. Sliced evaluation metrics of the Model. The schema of the * metrics is stored in metrics_schema_uri */ readonly metrics?: any; /** * Output only. Points to a YAML file stored on Google Cloud Storage * describing the metrics of this ModelEvaluationSlice. The schema is defined * as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ readonly metricsSchemaUri?: string; /** * Output only. Aggregated explanation metrics for the Model's prediction * output over the data this ModelEvaluation uses. This field is populated * only if the Model is evaluated with explanations, and only for tabular * Models. */ readonly modelExplanation?: GoogleCloudAiplatformV1ModelExplanation; /** * Output only. The resource name of the ModelEvaluationSlice. */ readonly name?: string; /** * Output only. The slice of the test data that is used to evaluate the * Model. */ readonly slice?: GoogleCloudAiplatformV1ModelEvaluationSliceSlice; } /** * Definition of a slice. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSlice { /** * Output only. The dimension of the slice. Well-known dimensions are: * * `annotationSpec`: This slice is on the test data that has either ground * truth or prediction with AnnotationSpec.display_name equals to value. * * `slice`: This slice is a user customized slice defined by its SliceSpec. */ readonly dimension?: string; /** * Output only. Specification for how the data was sliced. */ readonly sliceSpec?: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpec; /** * Output only. The value of the dimension in this slice. */ readonly value?: string; } /** * Specification for how the data should be sliced. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpec { /** * Mapping configuration for this SliceSpec. The key is the name of the * feature. By default, the key will be prefixed by "instance" as a dictionary * prefix for Vertex Batch Predictions output format. */ configs?: { [key: string]: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecSliceConfig }; } /** * A range of values for slice(s). `low` is inclusive, `high` is exclusive. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecRange { /** * Exclusive high value for the range. */ high?: number; /** * Inclusive low value for the range. */ low?: number; } /** * Specification message containing the config for this SliceSpec. When `kind` * is selected as `value` and/or `range`, only a single slice will be computed. * When `all_values` is present, a separate slice will be computed for each * possible label/value for the corresponding key in `config`. Examples, with * feature zip_code with values 12345, 23334, 88888 and feature country with * values "US", "Canada", "Mexico" in the dataset: Example 1: { "zip_code": { * "value": { "float_value": 12345.0 } } } A single slice for any data with * zip_code 12345 in the dataset. Example 2: { "zip_code": { "range": { "low": * 12345, "high": 20000 } } } A single slice containing data where the zip_codes * between 12345 and 20000 For this example, data with the zip_code of 12345 * will be in this slice. Example 3: { "zip_code": { "range": { "low": 10000, * "high": 20000 } }, "country": { "value": { "string_value": "US" } } } A * single slice containing data where the zip_codes between 10000 and 20000 has * the country "US". For this example, data with the zip_code of 12345 and * country "US" will be in this slice. Example 4: { "country": {"all_values": { * "value": true } } } Three slices are computed, one for each unique country in * the dataset. Example 5: { "country": { "all_values": { "value": true } }, * "zip_code": { "value": { "float_value": 12345.0 } } } Three slices are * computed, one for each unique country in the dataset where the zip_code is * also 12345. For this example, data with zip_code 12345 and country "US" will * be in one slice, zip_code 12345 and country "Canada" in another slice, and * zip_code 12345 and country "Mexico" in another slice, totaling 3 slices. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecSliceConfig { /** * If all_values is set to true, then all possible labels of the keyed * feature will have another slice computed. Example: * `{"all_values":{"value":true}}` */ allValues?: boolean; /** * A range of values for a numerical feature. Example: * `{"range":{"low":10000.0,"high":50000.0}}` will capture 12345 and 23334 in * the slice. */ range?: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecRange; /** * A unique specific value for a given feature. Example: `{ "value": { * "string_value": "12345" } }` */ value?: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecValue; } /** * Single value that supports strings and floats. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecValue { /** * Float type. */ floatValue?: number; /** * String type. */ stringValue?: string; } /** * Aggregated explanation metrics for a Model over a set of instances. */ export interface GoogleCloudAiplatformV1ModelExplanation { /** * Output only. Aggregated attributions explaining the Model's prediction * outputs over the set of instances. The attributions are grouped by outputs. * For Models that predict only one output, such as regression Models that * predict only one score, there is only one attibution that explains the * predicted output. For Models that predict multiple outputs, such as * multiclass Models that predict multiple classes, each element explains one * specific item. Attribution.output_index can be used to identify which * output this attribution is explaining. The baselineOutputValue, * instanceOutputValue and featureAttributions fields are averaged over the * test data. NOTE: Currently AutoML tabular classification Models produce * only one attribution, which averages attributions over all the classes it * predicts. Attribution.approximation_error is not populated. */ readonly meanAttributions?: GoogleCloudAiplatformV1Attribution[]; } /** * Represents export format supported by the Model. All formats export to * Google Cloud Storage. */ export interface GoogleCloudAiplatformV1ModelExportFormat { /** * Output only. The content of this Model that may be exported. */ readonly exportableContents?: | "EXPORTABLE_CONTENT_UNSPECIFIED" | "ARTIFACT" | "IMAGE"[]; /** * Output only. The ID of the export format. The possible format IDs are: * * `tflite` Used for Android mobile devices. * `edgetpu-tflite` Used for [Edge * TPU](https://cloud.google.com/edge-tpu/) devices. * `tf-saved-model` A * tensorflow model in SavedModel format. * `tf-js` A * [TensorFlow.js](https://www.tensorflow.org/js) model that can be used in * the browser and in Node.js using JavaScript. * `core-ml` Used for iOS * mobile devices. * `custom-trained` A Model that was uploaded or trained by * custom code. */ readonly id?: string; } /** * Contains information about the source of the models generated from Model * Garden. */ export interface GoogleCloudAiplatformV1ModelGardenSource { /** * Required. The model garden source model resource name. */ publicModelName?: string; } /** * The alert config for model monitoring. */ export interface GoogleCloudAiplatformV1ModelMonitoringAlertConfig { /** * Email alert config. */ emailAlertConfig?: GoogleCloudAiplatformV1ModelMonitoringAlertConfigEmailAlertConfig; /** * Dump the anomalies to Cloud Logging. The anomalies will be put to json * payload encoded from proto ModelMonitoringStatsAnomalies. This can be * further synced to Pub/Sub or any other services supported by Cloud Logging. */ enableLogging?: boolean; /** * Resource names of the NotificationChannels to send alert. Must be of the * format `projects//notificationChannels/` */ notificationChannels?: string[]; } /** * The config for email alert. */ export interface GoogleCloudAiplatformV1ModelMonitoringAlertConfigEmailAlertConfig { /** * The email addresses to send the alert. */ userEmails?: string[]; } /** * The objective configuration for model monitoring, including the information * needed to detect anomalies for one particular model. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfig { /** * The config for integrating with Vertex Explainable AI. */ explanationConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfig; /** * The config for drift of prediction data. */ predictionDriftDetectionConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig; /** * Training dataset for models. This field has to be set only if * TrainingPredictionSkewDetectionConfig is specified. */ trainingDataset?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingDataset; /** * The config for skew between training data and prediction data. */ trainingPredictionSkewDetectionConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig; } /** * The config for integrating with Vertex Explainable AI. Only applicable if * the Model has explanation_spec populated. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfig { /** * If want to analyze the Vertex Explainable AI feature attribute scores or * not. If set to true, Vertex AI will log the feature attributions from * explain response and do the skew/drift detection for them. */ enableFeatureAttributes?: boolean; /** * Predictions generated by the BatchPredictionJob using baseline dataset. */ explanationBaseline?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline; } /** * Output from BatchPredictionJob for Model Monitoring baseline dataset, which * can be used to generate baseline attribution scores. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline { /** * BigQuery location for BatchExplain output. */ bigquery?: GoogleCloudAiplatformV1BigQueryDestination; /** * Cloud Storage location for BatchExplain output. */ gcs?: GoogleCloudAiplatformV1GcsDestination; /** * The storage format of the predictions generated BatchPrediction job. */ predictionFormat?: | "PREDICTION_FORMAT_UNSPECIFIED" | "JSONL" | "BIGQUERY"; } /** * The config for Prediction data drift detection. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig { /** * Key is the feature name and value is the threshold. The threshold here is * against attribution score distance between different time windows. */ attributionScoreDriftThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; /** * Drift anomaly detection threshold used by all features. When the * per-feature thresholds are not set, this field can be used to specify a * threshold for all features. */ defaultDriftThreshold?: GoogleCloudAiplatformV1ThresholdConfig; /** * Key is the feature name and value is the threshold. If a feature needs to * be monitored for drift, a value threshold must be configured for that * feature. The threshold here is against feature distribution distance * between different time windws. */ driftThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; } /** * Training Dataset information. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingDataset { /** * The BigQuery table of the unmanaged Dataset used to train this Model. */ bigquerySource?: GoogleCloudAiplatformV1BigQuerySource; /** * Data format of the dataset, only applicable if the input is from Google * Cloud Storage. The possible formats are: "tf-record" The source file is a * TFRecord file. "csv" The source file is a CSV file. "jsonl" The source file * is a JSONL file. */ dataFormat?: string; /** * The resource name of the Dataset used to train this Model. */ dataset?: string; /** * The Google Cloud Storage uri of the unmanaged Dataset used to train this * Model. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Strategy to sample data from Training Dataset. If not set, we process the * whole dataset. */ loggingSamplingStrategy?: GoogleCloudAiplatformV1SamplingStrategy; /** * The target field name the model is to predict. This field will be excluded * when doing Predict and (or) Explain for the training data. */ targetField?: string; } /** * The config for Training & Prediction data skew detection. It specifies the * training dataset sources and the skew detection parameters. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig { /** * Key is the feature name and value is the threshold. The threshold here is * against attribution score distance between the training and prediction * feature. */ attributionScoreSkewThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; /** * Skew anomaly detection threshold used by all features. When the * per-feature thresholds are not set, this field can be used to specify a * threshold for all features. */ defaultSkewThreshold?: GoogleCloudAiplatformV1ThresholdConfig; /** * Key is the feature name and value is the threshold. If a feature needs to * be monitored for skew, a value threshold must be configured for that * feature. The threshold here is against feature distribution distance * between the training and prediction feature. */ skewThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; } /** * Statistics and anomalies generated by Model Monitoring. */ export interface GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies { /** * Number of anomalies within all stats. */ anomalyCount?: number; /** * Deployed Model ID. */ deployedModelId?: string; /** * A list of historical Stats and Anomalies generated for all Features. */ featureStats?: GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies[]; /** * Model Monitoring Objective those stats and anomalies belonging to. */ objective?: | "MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED" | "RAW_FEATURE_SKEW" | "RAW_FEATURE_DRIFT" | "FEATURE_ATTRIBUTION_SKEW" | "FEATURE_ATTRIBUTION_DRIFT"; } function serializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies { return { ...data, featureStats: data["featureStats"] !== undefined ? data["featureStats"].map((item: any) => (serializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies { return { ...data, featureStats: data["featureStats"] !== undefined ? data["featureStats"].map((item: any) => (deserializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(item))) : undefined, }; } /** * Historical Stats (and Anomalies) for a specific Feature. */ export interface GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { /** * Display Name of the Feature. */ featureDisplayName?: string; /** * A list of historical stats generated by different time window's Prediction * Dataset. */ predictionStats?: GoogleCloudAiplatformV1FeatureStatsAnomaly[]; /** * Threshold for anomaly detection. */ threshold?: GoogleCloudAiplatformV1ThresholdConfig; /** * Stats calculated for the Training Dataset. */ trainingStats?: GoogleCloudAiplatformV1FeatureStatsAnomaly; } function serializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { return { ...data, predictionStats: data["predictionStats"] !== undefined ? data["predictionStats"].map((item: any) => (serializeGoogleCloudAiplatformV1FeatureStatsAnomaly(item))) : undefined, trainingStats: data["trainingStats"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data["trainingStats"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { return { ...data, predictionStats: data["predictionStats"] !== undefined ? data["predictionStats"].map((item: any) => (deserializeGoogleCloudAiplatformV1FeatureStatsAnomaly(item))) : undefined, trainingStats: data["trainingStats"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data["trainingStats"]) : undefined, }; } /** * Contains information about the original Model if this Model is a copy. */ export interface GoogleCloudAiplatformV1ModelOriginalModelInfo { /** * Output only. The resource name of the Model this Model is a copy of, * including the revision. Format: * `projects/{project}/locations/{location}/models/{model_id}@{version_id}` */ readonly model?: string; } /** * Detail description of the source information of the model. */ export interface GoogleCloudAiplatformV1ModelSourceInfo { /** * If this Model is copy of another Model. If true then source_type pertains * to the original. */ copy?: boolean; /** * Type of the model source. */ sourceType?: | "MODEL_SOURCE_TYPE_UNSPECIFIED" | "AUTOML" | "CUSTOM" | "BQML" | "MODEL_GARDEN" | "GENIE" | "CUSTOM_TEXT_EMBEDDING" | "MARKETPLACE"; } /** * Runtime operation information for IndexEndpointService.MutateDeployedIndex. */ export interface GoogleCloudAiplatformV1MutateDeployedIndexOperationMetadata { /** * The unique index id specified by user */ deployedIndexId?: string; /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Response message for IndexEndpointService.MutateDeployedIndex. */ export interface GoogleCloudAiplatformV1MutateDeployedIndexResponse { /** * The DeployedIndex that had been updated in the IndexEndpoint. */ deployedIndex?: GoogleCloudAiplatformV1DeployedIndex; } /** * Runtime operation information for EndpointService.MutateDeployedModel. */ export interface GoogleCloudAiplatformV1MutateDeployedModelOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for EndpointService.MutateDeployedModel. */ export interface GoogleCloudAiplatformV1MutateDeployedModelRequest { /** * Required. The DeployedModel to be mutated within the Endpoint. Only the * following fields can be mutated: * `min_replica_count` in either * DedicatedResources or AutomaticResources * `max_replica_count` in either * DedicatedResources or AutomaticResources * autoscaling_metric_specs * * `disable_container_logging` (v1 only) * `enable_container_logging` (v1beta1 * only) */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; /** * Required. The update mask applies to the resource. See * google.protobuf.FieldMask. */ updateMask?: string /* FieldMask */; } function serializeGoogleCloudAiplatformV1MutateDeployedModelRequest(data: any): GoogleCloudAiplatformV1MutateDeployedModelRequest { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeGoogleCloudAiplatformV1MutateDeployedModelRequest(data: any): GoogleCloudAiplatformV1MutateDeployedModelRequest { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Response message for EndpointService.MutateDeployedModel. */ export interface GoogleCloudAiplatformV1MutateDeployedModelResponse { /** * The DeployedModel that's being mutated. */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; } /** * Represents a Neural Architecture Search (NAS) job. */ export interface GoogleCloudAiplatformV1NasJob { /** * Output only. Time when the NasJob was created. */ readonly createTime?: Date; /** * Required. The display name of the NasJob. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Enable a separation of Custom model training and restricted * image training for tenant project. */ enableRestrictedImageTraining?: boolean; /** * Customer-managed encryption key options for a NasJob. If this is set, then * all resources created by the NasJob will be encrypted with the provided * encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the NasJob entered any of the following states: * `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when job's state is JOB_STATE_FAILED or * JOB_STATE_CANCELLED. */ readonly error?: GoogleRpcStatus; /** * The labels with user-defined metadata to organize NasJobs. Label keys and * values can be no longer than 64 characters (Unicode codepoints), can only * contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the NasJob. */ readonly name?: string; /** * Output only. Output of the NasJob. */ readonly nasJobOutput?: GoogleCloudAiplatformV1NasJobOutput; /** * Required. The specification of a NasJob. */ nasJobSpec?: GoogleCloudAiplatformV1NasJobSpec; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the NasJob for the first time entered the * `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Output only. Time when the NasJob was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1NasJob(data: any): GoogleCloudAiplatformV1NasJob { return { ...data, nasJobSpec: data["nasJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpec(data["nasJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJob(data: any): GoogleCloudAiplatformV1NasJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, nasJobSpec: data["nasJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpec(data["nasJobSpec"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Represents a uCAIP NasJob output. */ export interface GoogleCloudAiplatformV1NasJobOutput { /** * Output only. The output of this multi-trial Neural Architecture Search * (NAS) job. */ readonly multiTrialJobOutput?: GoogleCloudAiplatformV1NasJobOutputMultiTrialJobOutput; } /** * The output of a multi-trial Neural Architecture Search (NAS) jobs. */ export interface GoogleCloudAiplatformV1NasJobOutputMultiTrialJobOutput { /** * Output only. List of NasTrials that were started as part of search stage. */ readonly searchTrials?: GoogleCloudAiplatformV1NasTrial[]; /** * Output only. List of NasTrials that were started as part of train stage. */ readonly trainTrials?: GoogleCloudAiplatformV1NasTrial[]; } /** * Represents the spec of a NasJob. */ export interface GoogleCloudAiplatformV1NasJobSpec { /** * The spec of multi-trial algorithms. */ multiTrialAlgorithmSpec?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec; /** * The ID of the existing NasJob in the same Project and Location which will * be used to resume search. search_space_spec and nas_algorithm_spec are * obtained from previous NasJob hence should not provide them again for this * NasJob. */ resumeNasJobId?: string; /** * It defines the search space for Neural Architecture Search (NAS). */ searchSpaceSpec?: string; } function serializeGoogleCloudAiplatformV1NasJobSpec(data: any): GoogleCloudAiplatformV1NasJobSpec { return { ...data, multiTrialAlgorithmSpec: data["multiTrialAlgorithmSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data["multiTrialAlgorithmSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpec(data: any): GoogleCloudAiplatformV1NasJobSpec { return { ...data, multiTrialAlgorithmSpec: data["multiTrialAlgorithmSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data["multiTrialAlgorithmSpec"]) : undefined, }; } /** * The spec of multi-trial Neural Architecture Search (NAS). */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec { /** * Metric specs for the NAS job. Validation for this field is done at * `multi_trial_algorithm_spec` field. */ metric?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecMetricSpec; /** * The multi-trial Neural Architecture Search (NAS) algorithm type. Defaults * to `REINFORCEMENT_LEARNING`. */ multiTrialAlgorithm?: | "MULTI_TRIAL_ALGORITHM_UNSPECIFIED" | "REINFORCEMENT_LEARNING" | "GRID_SEARCH"; /** * Required. Spec for search trials. */ searchTrialSpec?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec; /** * Spec for train trials. Top N [TrainTrialSpec.max_parallel_trial_count] * search trials will be trained for every M [TrainTrialSpec.frequency] trials * searched. */ trainTrialSpec?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec; } function serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec { return { ...data, searchTrialSpec: data["searchTrialSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data["searchTrialSpec"]) : undefined, trainTrialSpec: data["trainTrialSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data["trainTrialSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec { return { ...data, searchTrialSpec: data["searchTrialSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data["searchTrialSpec"]) : undefined, trainTrialSpec: data["trainTrialSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data["trainTrialSpec"]) : undefined, }; } /** * Represents a metric to optimize. */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecMetricSpec { /** * Required. The optimization goal of the metric. */ goal?: | "GOAL_TYPE_UNSPECIFIED" | "MAXIMIZE" | "MINIMIZE"; /** * Required. The ID of the metric. Must not contain whitespaces. */ metricId?: string; } /** * Represent spec for search trials. */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { /** * The number of failed trials that need to be seen before failing the * NasJob. If set to 0, Vertex AI decides how many trials must fail before the * whole job fails. */ maxFailedTrialCount?: number; /** * Required. The maximum number of trials to run in parallel. */ maxParallelTrialCount?: number; /** * Required. The maximum number of Neural Architecture Search (NAS) trials to * run. */ maxTrialCount?: number; /** * Required. The spec of a search trial job. The same spec applies to all * search trials. */ searchTrialJobSpec?: GoogleCloudAiplatformV1CustomJobSpec; } function serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { return { ...data, searchTrialJobSpec: data["searchTrialJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["searchTrialJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { return { ...data, searchTrialJobSpec: data["searchTrialJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["searchTrialJobSpec"]) : undefined, }; } /** * Represent spec for train trials. */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { /** * Required. Frequency of search trials to start train stage. Top N * [TrainTrialSpec.max_parallel_trial_count] search trials will be trained for * every M [TrainTrialSpec.frequency] trials searched. */ frequency?: number; /** * Required. The maximum number of trials to run in parallel. */ maxParallelTrialCount?: number; /** * Required. The spec of a train trial job. The same spec applies to all * train trials. */ trainTrialJobSpec?: GoogleCloudAiplatformV1CustomJobSpec; } function serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { return { ...data, trainTrialJobSpec: data["trainTrialJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["trainTrialJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { return { ...data, trainTrialJobSpec: data["trainTrialJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["trainTrialJobSpec"]) : undefined, }; } /** * Represents a uCAIP NasJob trial. */ export interface GoogleCloudAiplatformV1NasTrial { /** * Output only. Time when the NasTrial's status changed to `SUCCEEDED` or * `INFEASIBLE`. */ readonly endTime?: Date; /** * Output only. The final measurement containing the objective value. */ readonly finalMeasurement?: GoogleCloudAiplatformV1Measurement; /** * Output only. The identifier of the NasTrial assigned by the service. */ readonly id?: string; /** * Output only. Time when the NasTrial was started. */ readonly startTime?: Date; /** * Output only. The detailed state of the NasTrial. */ readonly state?: | "STATE_UNSPECIFIED" | "REQUESTED" | "ACTIVE" | "STOPPING" | "SUCCEEDED" | "INFEASIBLE"; } /** * Represents a NasTrial details along with its parameters. If there is a * corresponding train NasTrial, the train NasTrial is also returned. */ export interface GoogleCloudAiplatformV1NasTrialDetail { /** * Output only. Resource name of the NasTrialDetail. */ readonly name?: string; /** * The parameters for the NasJob NasTrial. */ parameters?: string; /** * The requested search NasTrial. */ searchTrial?: GoogleCloudAiplatformV1NasTrial; /** * The train NasTrial corresponding to search_trial. Only populated if * search_trial is used for training. */ trainTrial?: GoogleCloudAiplatformV1NasTrial; } /** * A query to find a number of similar entities. */ export interface GoogleCloudAiplatformV1NearestNeighborQuery { /** * Optional. The embedding vector that be used for similar search. */ embedding?: GoogleCloudAiplatformV1NearestNeighborQueryEmbedding; /** * Optional. The entity id whose similar entities should be searched for. If * embedding is set, search will use embedding instead of entity_id. */ entityId?: string; /** * Optional. The number of similar entities to be retrieved from feature view * for each query. */ neighborCount?: number; /** * Optional. The list of numeric filters. */ numericFilters?: GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter[]; /** * Optional. Parameters that can be set to tune query on the fly. */ parameters?: GoogleCloudAiplatformV1NearestNeighborQueryParameters; /** * Optional. Crowding is a constraint on a neighbor list produced by nearest * neighbor search requiring that no more than * sper_crowding_attribute_neighbor_count of the k neighbors returned have the * same value of crowding_attribute. It's used for improving result diversity. */ perCrowdingAttributeNeighborCount?: number; /** * Optional. The list of string filters. */ stringFilters?: GoogleCloudAiplatformV1NearestNeighborQueryStringFilter[]; } function serializeGoogleCloudAiplatformV1NearestNeighborQuery(data: any): GoogleCloudAiplatformV1NearestNeighborQuery { return { ...data, numericFilters: data["numericFilters"] !== undefined ? data["numericFilters"].map((item: any) => (serializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborQuery(data: any): GoogleCloudAiplatformV1NearestNeighborQuery { return { ...data, numericFilters: data["numericFilters"] !== undefined ? data["numericFilters"].map((item: any) => (deserializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(item))) : undefined, }; } /** * The embedding vector. */ export interface GoogleCloudAiplatformV1NearestNeighborQueryEmbedding { /** * Optional. Individual value in the embedding. */ value?: number[]; } /** * Numeric filter is used to search a subset of the entities by using boolean * rules on numeric columns. For example: Database Point 0: {name: "a" * value_int: 42} {name: "b" value_float: 1.0} Database Point 1: {name: "a" * value_int: 10} {name: "b" value_float: 2.0} Database Point 2: {name: "a" * value_int: -1} {name: "b" value_float: 3.0} Query: {name: "a" value_int: 12 * operator: LESS} // Matches Point 1, 2 {name: "b" value_float: 2.0 operator: * EQUAL} // Matches Point 1 */ export interface GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter { /** * Required. Column name in BigQuery that used as filters. */ name?: string; /** * Optional. This MUST be specified for queries and must NOT be specified for * database points. */ op?: | "OPERATOR_UNSPECIFIED" | "LESS" | "LESS_EQUAL" | "EQUAL" | "GREATER_EQUAL" | "GREATER" | "NOT_EQUAL"; /** * double value type. */ valueDouble?: number; /** * float value type. */ valueFloat?: number; /** * int value type. */ valueInt?: bigint; } function serializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(data: any): GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter { return { ...data, valueInt: data["valueInt"] !== undefined ? String(data["valueInt"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(data: any): GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter { return { ...data, valueInt: data["valueInt"] !== undefined ? BigInt(data["valueInt"]) : undefined, }; } /** * Parameters that can be overrided in each query to tune query latency and * recall. */ export interface GoogleCloudAiplatformV1NearestNeighborQueryParameters { /** * Optional. The number of neighbors to find via approximate search before * exact reordering is performed; if set, this value must be > neighbor_count. */ approximateNeighborCandidates?: number; /** * Optional. The fraction of the number of leaves to search, set at query * time allows user to tune search performance. This value increase result in * both search accuracy and latency increase. The value should be between 0.0 * and 1.0. */ leafNodesSearchFraction?: number; } /** * String filter is used to search a subset of the entities by using boolean * rules on string columns. For example: if a query specifies string filter with * 'name = color, allow_tokens = {red, blue}, deny_tokens = {purple}',' then * that query will match entities that are red or blue, but if those points are * also purple, then they will be excluded even if they are red/blue. Only * string filter is supported for now, numeric filter will be supported in the * near future. */ export interface GoogleCloudAiplatformV1NearestNeighborQueryStringFilter { /** * Optional. The allowed tokens. */ allowTokens?: string[]; /** * Optional. The denied tokens. */ denyTokens?: string[]; /** * Required. Column names in BigQuery that used as filters. */ name?: string; } /** * Nearest neighbors for one query. */ export interface GoogleCloudAiplatformV1NearestNeighbors { /** * All its neighbors. */ neighbors?: GoogleCloudAiplatformV1NearestNeighborsNeighbor[]; } function serializeGoogleCloudAiplatformV1NearestNeighbors(data: any): GoogleCloudAiplatformV1NearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (serializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighbors(data: any): GoogleCloudAiplatformV1NearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (deserializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(item))) : undefined, }; } /** * Runtime operation metadata with regard to Matching Engine Index. */ export interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata { /** * The validation stats of the content (per file) to be inserted or updated * on the Matching Engine Index resource. Populated if contentsDeltaUri is * provided as part of Index.metadata. Please note that, currently for those * files that are broken or has unsupported file format, we will not have the * stats for those files. */ contentValidationStats?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats[]; /** * The ingested data size in bytes. */ dataBytesCount?: bigint; } function serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata { return { ...data, contentValidationStats: data["contentValidationStats"] !== undefined ? data["contentValidationStats"].map((item: any) => (serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(item))) : undefined, dataBytesCount: data["dataBytesCount"] !== undefined ? String(data["dataBytesCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata { return { ...data, contentValidationStats: data["contentValidationStats"] !== undefined ? data["contentValidationStats"].map((item: any) => (deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(item))) : undefined, dataBytesCount: data["dataBytesCount"] !== undefined ? BigInt(data["dataBytesCount"]) : undefined, }; } export interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats { /** * Number of records in this file we skipped due to validate errors. */ invalidRecordCount?: bigint; /** * Number of sparse records in this file we skipped due to validate errors. */ invalidSparseRecordCount?: bigint; /** * The detail information of the partial failures encountered for those * invalid records that couldn't be parsed. Up to 50 partial errors will be * reported. */ partialErrors?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError[]; /** * Cloud Storage URI pointing to the original file in user's bucket. */ sourceGcsUri?: string; /** * Number of records in this file that were successfully processed. */ validRecordCount?: bigint; /** * Number of sparse records in this file that were successfully processed. */ validSparseRecordCount?: bigint; } function serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats { return { ...data, invalidRecordCount: data["invalidRecordCount"] !== undefined ? String(data["invalidRecordCount"]) : undefined, invalidSparseRecordCount: data["invalidSparseRecordCount"] !== undefined ? String(data["invalidSparseRecordCount"]) : undefined, validRecordCount: data["validRecordCount"] !== undefined ? String(data["validRecordCount"]) : undefined, validSparseRecordCount: data["validSparseRecordCount"] !== undefined ? String(data["validSparseRecordCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats { return { ...data, invalidRecordCount: data["invalidRecordCount"] !== undefined ? BigInt(data["invalidRecordCount"]) : undefined, invalidSparseRecordCount: data["invalidSparseRecordCount"] !== undefined ? BigInt(data["invalidSparseRecordCount"]) : undefined, validRecordCount: data["validRecordCount"] !== undefined ? BigInt(data["validRecordCount"]) : undefined, validSparseRecordCount: data["validSparseRecordCount"] !== undefined ? BigInt(data["validSparseRecordCount"]) : undefined, }; } export interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError { /** * Empty if the embedding id is failed to parse. */ embeddingId?: string; /** * A human-readable message that is shown to the user to help them fix the * error. Note that this message may change from time to time, your code * should check against error_type as the source of truth. */ errorMessage?: string; /** * The error type of this record. */ errorType?: | "ERROR_TYPE_UNSPECIFIED" | "EMPTY_LINE" | "INVALID_JSON_SYNTAX" | "INVALID_CSV_SYNTAX" | "INVALID_AVRO_SYNTAX" | "INVALID_EMBEDDING_ID" | "EMBEDDING_SIZE_MISMATCH" | "NAMESPACE_MISSING" | "PARSING_ERROR" | "DUPLICATE_NAMESPACE" | "OP_IN_DATAPOINT" | "MULTIPLE_VALUES" | "INVALID_NUMERIC_VALUE" | "INVALID_ENCODING" | "INVALID_SPARSE_DIMENSIONS" | "INVALID_TOKEN_VALUE" | "INVALID_SPARSE_EMBEDDING" | "INVALID_EMBEDDING"; /** * The original content of this record. */ rawRecord?: string; /** * Cloud Storage URI pointing to the original file in user's bucket. */ sourceGcsUri?: string; } /** * A neighbor of the query vector. */ export interface GoogleCloudAiplatformV1NearestNeighborsNeighbor { /** * The distance between the neighbor and the query vector. */ distance?: number; /** * The id of the similar entity. */ entityId?: string; /** * The attributes of the neighbor, e.g. filters, crowding and metadata Note * that full entities are returned only when "return_full_entity" is set to * true. Otherwise, only the "entity_id" and "distance" fields are populated. */ entityKeyValues?: GoogleCloudAiplatformV1FetchFeatureValuesResponse; } function serializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(data: any): GoogleCloudAiplatformV1NearestNeighborsNeighbor { return { ...data, entityKeyValues: data["entityKeyValues"] !== undefined ? serializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data["entityKeyValues"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(data: any): GoogleCloudAiplatformV1NearestNeighborsNeighbor { return { ...data, entityKeyValues: data["entityKeyValues"] !== undefined ? deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data["entityKeyValues"]) : undefined, }; } /** * Neighbors for example-based explanations. */ export interface GoogleCloudAiplatformV1Neighbor { /** * Output only. The neighbor distance. */ readonly neighborDistance?: number; /** * Output only. The neighbor id. */ readonly neighborId?: string; } /** * Network spec. */ export interface GoogleCloudAiplatformV1NetworkSpec { /** * Whether to enable public internet access. Default false. */ enableInternetAccess?: boolean; /** * The full name of the Google Compute Engine * [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) */ network?: string; /** * The name of the subnet that this instance is in. Format: * `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` */ subnetwork?: string; } /** * Represents a mount configuration for Network File System (NFS) to mount. */ export interface GoogleCloudAiplatformV1NfsMount { /** * Required. Destination mount path. The NFS will be mounted for the user * under /mnt/nfs/ */ mountPoint?: string; /** * Required. Source path exported from NFS server. Has to start with '/', and * combined with the ip address, it indicates the source mount path in the * form of `server:path` */ path?: string; /** * Required. IP address of the NFS server. */ server?: string; } /** * The euc configuration of NotebookRuntimeTemplate. */ export interface GoogleCloudAiplatformV1NotebookEucConfig { /** * Output only. Whether ActAs check is bypassed for service account attached * to the VM. If false, we need ActAs check for the default Compute Engine * Service account. When a Runtime is created, a VM is allocated using Default * Compute Engine Service Account. Any user requesting to use this Runtime * requires Service Account User (ActAs) permission over this SA. If true, * Runtime owner is using EUC and does not require the above permission as VM * no longer use default Compute Engine SA, but a P4SA. */ readonly bypassActasCheck?: boolean; /** * Input only. Whether EUC is disabled in this NotebookRuntimeTemplate. In * proto3, the default value of a boolean is false. In this way, by default * EUC will be enabled for NotebookRuntimeTemplate. */ eucDisabled?: boolean; } /** * NotebookExecutionJob represents an instance of a notebook execution. */ export interface GoogleCloudAiplatformV1NotebookExecutionJob { /** * Output only. Timestamp when this NotebookExecutionJob was created. */ readonly createTime?: Date; /** * The custom compute configuration for an execution job. */ customEnvironmentSpec?: GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec; /** * The Dataform Repository pointing to a single file notebook repository. */ dataformRepositorySource?: GoogleCloudAiplatformV1NotebookExecutionJobDataformRepositorySource; /** * The contents of an input notebook file. */ directNotebookSource?: GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource; /** * The display name of the NotebookExecutionJob. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for the notebook execution job. This * field is auto-populated if the NotebookRuntimeTemplate has an encryption * spec. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Max running time of the execution job in seconds (default 86400s / 24 * hrs). */ executionTimeout?: number /* Duration */; /** * The user email to run the execution as. Only supported by Colab runtimes. */ executionUser?: string; /** * The Cloud Storage url pointing to the ipynb file. Format: * `gs://bucket/notebook_file.ipynb` */ gcsNotebookSource?: GoogleCloudAiplatformV1NotebookExecutionJobGcsNotebookSource; /** * The Cloud Storage location to upload the result to. Format: * `gs://bucket-name` */ gcsOutputUri?: string; /** * Output only. The state of the NotebookExecutionJob. */ readonly jobState?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * The name of the kernel to use during notebook execution. If unset, the * default kernel is used. */ kernelName?: string; /** * The labels with user-defined metadata to organize NotebookExecutionJobs. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. */ labels?: { [key: string]: string }; /** * Output only. The resource name of this NotebookExecutionJob. Format: * `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` */ readonly name?: string; /** * The NotebookRuntimeTemplate to source compute configuration from. */ notebookRuntimeTemplateResourceName?: string; /** * Output only. The Schedule resource name if this job is triggered by one. * Format: * `projects/{project_id}/locations/{location}/schedules/{schedule_id}` */ readonly scheduleResourceName?: string; /** * The service account to run the execution as. */ serviceAccount?: string; /** * Output only. Populated when the NotebookExecutionJob is completed. When * there is an error during notebook execution, the error details are * populated. */ readonly status?: GoogleRpcStatus; /** * Output only. Timestamp when this NotebookExecutionJob was most recently * updated. */ readonly updateTime?: Date; /** * The Workbench runtime configuration to use for the notebook execution. */ workbenchRuntime?: GoogleCloudAiplatformV1NotebookExecutionJobWorkbenchRuntime; } function serializeGoogleCloudAiplatformV1NotebookExecutionJob(data: any): GoogleCloudAiplatformV1NotebookExecutionJob { return { ...data, customEnvironmentSpec: data["customEnvironmentSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data["customEnvironmentSpec"]) : undefined, directNotebookSource: data["directNotebookSource"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data["directNotebookSource"]) : undefined, executionTimeout: data["executionTimeout"] !== undefined ? data["executionTimeout"] : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookExecutionJob(data: any): GoogleCloudAiplatformV1NotebookExecutionJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, customEnvironmentSpec: data["customEnvironmentSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data["customEnvironmentSpec"]) : undefined, directNotebookSource: data["directNotebookSource"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data["directNotebookSource"]) : undefined, executionTimeout: data["executionTimeout"] !== undefined ? data["executionTimeout"] : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Compute configuration to use for an execution job. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec { /** * The specification of a single machine for the execution job. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * The network configuration to use for the execution job. */ networkSpec?: GoogleCloudAiplatformV1NetworkSpec; /** * The specification of a persistent disk to attach for the execution job. */ persistentDiskSpec?: GoogleCloudAiplatformV1PersistentDiskSpec; } function serializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data: any): GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec { return { ...data, persistentDiskSpec: data["persistentDiskSpec"] !== undefined ? serializeGoogleCloudAiplatformV1PersistentDiskSpec(data["persistentDiskSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data: any): GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec { return { ...data, persistentDiskSpec: data["persistentDiskSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1PersistentDiskSpec(data["persistentDiskSpec"]) : undefined, }; } /** * The Dataform Repository containing the input notebook. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobDataformRepositorySource { /** * The commit SHA to read repository with. If unset, the file will be read at * HEAD. */ commitSha?: string; /** * The resource name of the Dataform Repository. Format: * `projects/{project_id}/locations/{location}/repositories/{repository_id}` */ dataformRepositoryResourceName?: string; } /** * The content of the input notebook in ipynb format. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource { /** * The base64-encoded contents of the input notebook file. */ content?: Uint8Array; } function serializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data: any): GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource { return { ...data, content: data["content"] !== undefined ? encodeBase64(data["content"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data: any): GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource { return { ...data, content: data["content"] !== undefined ? decodeBase64(data["content"] as string) : undefined, }; } /** * The Cloud Storage uri for the input notebook. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobGcsNotebookSource { /** * The version of the Cloud Storage object to read. If unset, the current * version of the object is read. See * https://cloud.google.com/storage/docs/metadata#generation-number. */ generation?: string; /** * The Cloud Storage uri pointing to the ipynb file. Format: * `gs://bucket/notebook_file.ipynb` */ uri?: string; } /** * Configuration for a Workbench Instances-based environment. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobWorkbenchRuntime { } /** * The idle shutdown configuration of NotebookRuntimeTemplate, which contains * the idle_timeout as required field. */ export interface GoogleCloudAiplatformV1NotebookIdleShutdownConfig { /** * Whether Idle Shutdown is disabled in this NotebookRuntimeTemplate. */ idleShutdownDisabled?: boolean; /** * Required. Duration is accurate to the second. In Notebook, Idle Timeout is * accurate to minute so the range of idle_timeout (second) is: 10 * 60 ~ 1440 * * 60. */ idleTimeout?: number /* Duration */; } function serializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data: any): GoogleCloudAiplatformV1NotebookIdleShutdownConfig { return { ...data, idleTimeout: data["idleTimeout"] !== undefined ? data["idleTimeout"] : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data: any): GoogleCloudAiplatformV1NotebookIdleShutdownConfig { return { ...data, idleTimeout: data["idleTimeout"] !== undefined ? data["idleTimeout"] : undefined, }; } /** * A runtime is a virtual machine allocated to a particular user for a * particular Notebook file on temporary basis with lifetime limited to 24 * hours. */ export interface GoogleCloudAiplatformV1NotebookRuntime { /** * Output only. Timestamp when this NotebookRuntime was created. */ readonly createTime?: Date; /** * The description of the NotebookRuntime. */ description?: string; /** * Required. The display name of the NotebookRuntime. The name can be up to * 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Output only. Customer-managed encryption key spec for the notebook * runtime. */ readonly encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Timestamp when this NotebookRuntime will be expired: 1. * System Predefined NotebookRuntime: 24 hours after creation. After * expiration, system predifined runtime will be deleted. 2. User created * NotebookRuntime: 6 months after last upgrade. After expiration, user * created runtime will be stopped and allowed for upgrade. */ readonly expirationTime?: Date; /** * Output only. The health state of the NotebookRuntime. */ readonly healthState?: | "HEALTH_STATE_UNSPECIFIED" | "HEALTHY" | "UNHEALTHY"; /** * Output only. The idle shutdown configuration of the notebook runtime. */ readonly idleShutdownConfig?: GoogleCloudAiplatformV1NotebookIdleShutdownConfig; /** * Output only. Whether NotebookRuntime is upgradable. */ readonly isUpgradable?: boolean; /** * The labels with user-defined metadata to organize your NotebookRuntime. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. No more than * 64 user labels can be associated with one NotebookRuntime (System labels * are excluded). See https://goo.gl/xmQnxf for more information and examples * of labels. System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. Following system labels * exist for NotebookRuntime: * * "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": output only, * its value is the Compute Engine instance id. * * "aiplatform.googleapis.com/colab_enterprise_entry_service": its value is * either "bigquery" or "vertex"; if absent, it should be "vertex". This is to * describe the entry service, either BigQuery or Vertex. */ labels?: { [key: string]: string }; /** * Output only. The resource name of the NotebookRuntime. */ readonly name?: string; /** * Optional. The Compute Engine tags to add to runtime (see [Tagging * instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). */ networkTags?: string[]; /** * Output only. The pointer to NotebookRuntimeTemplate this NotebookRuntime * is created from. */ readonly notebookRuntimeTemplateRef?: GoogleCloudAiplatformV1NotebookRuntimeTemplateRef; /** * Output only. The type of the notebook runtime. */ readonly notebookRuntimeType?: | "NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED" | "USER_DEFINED" | "ONE_CLICK"; /** * Output only. The proxy endpoint used to access the NotebookRuntime. */ readonly proxyUri?: string; /** * Output only. The runtime (instance) state of the NotebookRuntime. */ readonly runtimeState?: | "RUNTIME_STATE_UNSPECIFIED" | "RUNNING" | "BEING_STARTED" | "BEING_STOPPED" | "STOPPED" | "BEING_UPGRADED" | "ERROR" | "INVALID"; /** * Required. The user email of the NotebookRuntime. */ runtimeUser?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. The service account that the NotebookRuntime workload runs * as. */ readonly serviceAccount?: string; /** * Output only. Timestamp when this NotebookRuntime was most recently * updated. */ readonly updateTime?: Date; /** * Output only. The VM os image version of NotebookRuntime. */ readonly version?: string; } /** * A template that specifies runtime configurations such as machine type, * runtime version, network configurations, etc. Multiple runtimes can be * created from a runtime template. */ export interface GoogleCloudAiplatformV1NotebookRuntimeTemplate { /** * Output only. Timestamp when this NotebookRuntimeTemplate was created. */ readonly createTime?: Date; /** * Optional. The specification of persistent disk attached to the runtime as * data disk storage. */ dataPersistentDiskSpec?: GoogleCloudAiplatformV1PersistentDiskSpec; /** * The description of the NotebookRuntimeTemplate. */ description?: string; /** * Required. The display name of the NotebookRuntimeTemplate. The name can be * up to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for the notebook runtime. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * EUC configuration of the NotebookRuntimeTemplate. */ eucConfig?: GoogleCloudAiplatformV1NotebookEucConfig; /** * The idle shutdown configuration of NotebookRuntimeTemplate. This config * will only be set when idle shutdown is enabled. */ idleShutdownConfig?: GoogleCloudAiplatformV1NotebookIdleShutdownConfig; /** * Output only. The default template to use if not specified. */ readonly isDefault?: boolean; /** * The labels with user-defined metadata to organize the * NotebookRuntimeTemplates. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * Optional. Immutable. The specification of a single machine for the * template. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * The resource name of the NotebookRuntimeTemplate. */ name?: string; /** * Optional. Network spec. */ networkSpec?: GoogleCloudAiplatformV1NetworkSpec; /** * Optional. The Compute Engine tags to add to runtime (see [Tagging * instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). */ networkTags?: string[]; /** * Optional. Immutable. The type of the notebook runtime template. */ notebookRuntimeType?: | "NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED" | "USER_DEFINED" | "ONE_CLICK"; /** * The service account that the runtime workload runs as. You can use any * service account within the same project, but you must have the service * account user permission to use the instance. If not specified, the [Compute * Engine default service * account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) * is used. */ serviceAccount?: string; /** * Optional. Immutable. Runtime Shielded VM spec. */ shieldedVmConfig?: GoogleCloudAiplatformV1ShieldedVmConfig; /** * Output only. Timestamp when this NotebookRuntimeTemplate was most recently * updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data: any): GoogleCloudAiplatformV1NotebookRuntimeTemplate { return { ...data, dataPersistentDiskSpec: data["dataPersistentDiskSpec"] !== undefined ? serializeGoogleCloudAiplatformV1PersistentDiskSpec(data["dataPersistentDiskSpec"]) : undefined, idleShutdownConfig: data["idleShutdownConfig"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data["idleShutdownConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data: any): GoogleCloudAiplatformV1NotebookRuntimeTemplate { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, dataPersistentDiskSpec: data["dataPersistentDiskSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1PersistentDiskSpec(data["dataPersistentDiskSpec"]) : undefined, idleShutdownConfig: data["idleShutdownConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data["idleShutdownConfig"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Points to a NotebookRuntimeTemplateRef. */ export interface GoogleCloudAiplatformV1NotebookRuntimeTemplateRef { /** * Immutable. A resource name of the NotebookRuntimeTemplate. */ notebookRuntimeTemplate?: string; } /** * Input for pairwise metric. */ export interface GoogleCloudAiplatformV1PairwiseMetricInput { /** * Required. Pairwise metric instance. */ instance?: GoogleCloudAiplatformV1PairwiseMetricInstance; /** * Required. Spec for pairwise metric. */ metricSpec?: GoogleCloudAiplatformV1PairwiseMetricSpec; } /** * Pairwise metric instance. Usually one instance corresponds to one row in an * evaluation dataset. */ export interface GoogleCloudAiplatformV1PairwiseMetricInstance { /** * Instance specified as a json string. String key-value pairs are expected * in the json_instance to render PairwiseMetricSpec.instance_prompt_template. */ jsonInstance?: string; } /** * Spec for pairwise metric result. */ export interface GoogleCloudAiplatformV1PairwiseMetricResult { /** * Output only. Explanation for pairwise metric score. */ readonly explanation?: string; /** * Output only. Pairwise metric choice. */ readonly pairwiseChoice?: | "PAIRWISE_CHOICE_UNSPECIFIED" | "BASELINE" | "CANDIDATE" | "TIE"; } /** * Spec for pairwise metric. */ export interface GoogleCloudAiplatformV1PairwiseMetricSpec { /** * Required. Metric prompt template for pairwise metric. */ metricPromptTemplate?: string; } /** * Input for pairwise question answering quality metric. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInput { /** * Required. Pairwise question answering quality instance. */ instance?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInstance; /** * Required. Spec for pairwise question answering quality score metric. */ metricSpec?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualitySpec; } /** * Spec for pairwise question answering quality instance. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInstance { /** * Required. Output of the baseline model. */ baselinePrediction?: string; /** * Required. Text to answer the question. */ context?: string; /** * Required. Question Answering prompt for LLM. */ instruction?: string; /** * Required. Output of the candidate model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for pairwise question answering quality result. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityResult { /** * Output only. Confidence for question answering quality score. */ readonly confidence?: number; /** * Output only. Explanation for question answering quality score. */ readonly explanation?: string; /** * Output only. Pairwise question answering prediction choice. */ readonly pairwiseChoice?: | "PAIRWISE_CHOICE_UNSPECIFIED" | "BASELINE" | "CANDIDATE" | "TIE"; } /** * Spec for pairwise question answering quality score metric. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualitySpec { /** * Optional. Whether to use instance.reference to compute question answering * quality. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for pairwise summarization quality metric. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualityInput { /** * Required. Pairwise summarization quality instance. */ instance?: GoogleCloudAiplatformV1PairwiseSummarizationQualityInstance; /** * Required. Spec for pairwise summarization quality score metric. */ metricSpec?: GoogleCloudAiplatformV1PairwiseSummarizationQualitySpec; } /** * Spec for pairwise summarization quality instance. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualityInstance { /** * Required. Output of the baseline model. */ baselinePrediction?: string; /** * Required. Text to be summarized. */ context?: string; /** * Required. Summarization prompt for LLM. */ instruction?: string; /** * Required. Output of the candidate model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for pairwise summarization quality result. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualityResult { /** * Output only. Confidence for summarization quality score. */ readonly confidence?: number; /** * Output only. Explanation for summarization quality score. */ readonly explanation?: string; /** * Output only. Pairwise summarization prediction choice. */ readonly pairwiseChoice?: | "PAIRWISE_CHOICE_UNSPECIFIED" | "BASELINE" | "CANDIDATE" | "TIE"; } /** * Spec for pairwise summarization quality score metric. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualitySpec { /** * Optional. Whether to use instance.reference to compute pairwise * summarization quality. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * A datatype containing media that is part of a multi-part `Content` message. * A `Part` consists of data which has an associated datatype. A `Part` can only * contain one of the accepted types in `Part.data`. A `Part` must have a fixed * IANA MIME type identifying the type and subtype of the media if `inline_data` * or `file_data` field is filled with raw bytes. */ export interface GoogleCloudAiplatformV1Part { /** * Optional. URI based data. */ fileData?: GoogleCloudAiplatformV1FileData; /** * Optional. A predicted [FunctionCall] returned from the model that contains * a string representing the [FunctionDeclaration.name] with the parameters * and their values. */ functionCall?: GoogleCloudAiplatformV1FunctionCall; /** * Optional. The result output of a [FunctionCall] that contains a string * representing the [FunctionDeclaration.name] and a structured JSON object * containing any output from the function call. It is used as context to the * model. */ functionResponse?: GoogleCloudAiplatformV1FunctionResponse; /** * Optional. Inlined bytes data. */ inlineData?: GoogleCloudAiplatformV1Blob; /** * Optional. Text part (can be code). */ text?: string; /** * Optional. Video metadata. The metadata should only be specified while the * video data is presented in inline_data or file_data. */ videoMetadata?: GoogleCloudAiplatformV1VideoMetadata; } function serializeGoogleCloudAiplatformV1Part(data: any): GoogleCloudAiplatformV1Part { return { ...data, inlineData: data["inlineData"] !== undefined ? serializeGoogleCloudAiplatformV1Blob(data["inlineData"]) : undefined, videoMetadata: data["videoMetadata"] !== undefined ? serializeGoogleCloudAiplatformV1VideoMetadata(data["videoMetadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Part(data: any): GoogleCloudAiplatformV1Part { return { ...data, inlineData: data["inlineData"] !== undefined ? deserializeGoogleCloudAiplatformV1Blob(data["inlineData"]) : undefined, videoMetadata: data["videoMetadata"] !== undefined ? deserializeGoogleCloudAiplatformV1VideoMetadata(data["videoMetadata"]) : undefined, }; } /** * Request message for JobService.PauseModelDeploymentMonitoringJob. */ export interface GoogleCloudAiplatformV1PauseModelDeploymentMonitoringJobRequest { } /** * Request message for ScheduleService.PauseSchedule. */ export interface GoogleCloudAiplatformV1PauseScheduleRequest { } /** * Represents the spec of persistent disk options. */ export interface GoogleCloudAiplatformV1PersistentDiskSpec { /** * Size in GB of the disk (default is 100GB). */ diskSizeGb?: bigint; /** * Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" * (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard * Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme * Persistent Disk) */ diskType?: string; } function serializeGoogleCloudAiplatformV1PersistentDiskSpec(data: any): GoogleCloudAiplatformV1PersistentDiskSpec { return { ...data, diskSizeGb: data["diskSizeGb"] !== undefined ? String(data["diskSizeGb"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PersistentDiskSpec(data: any): GoogleCloudAiplatformV1PersistentDiskSpec { return { ...data, diskSizeGb: data["diskSizeGb"] !== undefined ? BigInt(data["diskSizeGb"]) : undefined, }; } /** * Represents long-lasting resources that are dedicated to users to runs custom * workloads. A PersistentResource can have multiple node pools and each node * pool can have its own machine spec. */ export interface GoogleCloudAiplatformV1PersistentResource { /** * Output only. Time when the PersistentResource was created. */ readonly createTime?: Date; /** * Optional. The display name of the PersistentResource. The name can be up * to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Customer-managed encryption key spec for a PersistentResource. * If set, this PersistentResource and all sub-resources of this * PersistentResource will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Only populated when persistent resource's state is `STOPPING` * or `ERROR`. */ readonly error?: GoogleRpcStatus; /** * Optional. The labels with user-defined metadata to organize * PersistentResource. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * Immutable. Resource name of a PersistentResource. */ name?: string; /** * Optional. The full name of the Compute Engine * [network](/compute/docs/networks-and-firewalls#networks) to peered with * Vertex AI to host the persistent resources. For example, * `projects/12345/global/networks/myVPC`. * [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in `12345`, and {network} is a network name. To specify * this field, you must have already [configured VPC Network Peering for * Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If * this field is left unspecified, the resources aren't peered with any * network. */ network?: string; /** * Optional. A list of names for the reserved IP ranges under the VPC network * that can be used for this persistent resource. If set, we will deploy the * persistent resource within the provided IP ranges. Otherwise, the * persistent resource is deployed to any IP ranges under the provided VPC * network. Example: ['vertex-ai-ip-range']. */ reservedIpRanges?: string[]; /** * Required. The spec of the pools of different resources. */ resourcePools?: GoogleCloudAiplatformV1ResourcePool[]; /** * Output only. Runtime information of the Persistent Resource. */ readonly resourceRuntime?: GoogleCloudAiplatformV1ResourceRuntime; /** * Optional. Persistent Resource runtime spec. For example, used for Ray * cluster configuration. */ resourceRuntimeSpec?: GoogleCloudAiplatformV1ResourceRuntimeSpec; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the PersistentResource for the first time entered * the `RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of a Study. */ readonly state?: | "STATE_UNSPECIFIED" | "PROVISIONING" | "RUNNING" | "STOPPING" | "ERROR" | "REBOOTING" | "UPDATING"; /** * Output only. Time when the PersistentResource was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1PersistentResource(data: any): GoogleCloudAiplatformV1PersistentResource { return { ...data, resourcePools: data["resourcePools"] !== und