// Copyright 2022 Luca Casonato. All rights reserved. MIT license. /** * Google Cloud Memorystore for Redis API Client for Deno * ====================================================== * * Creates and manages Redis instances on the Google Cloud Platform. * * Docs: https://cloud.google.com/memorystore/docs/redis/ * Source: https://googleapis.deno.dev/v1/redis:v1.ts */ import { auth, CredentialsClient, GoogleAuth, request } from "/_/base@v1/mod.ts"; export { auth, GoogleAuth }; export type { CredentialsClient }; /** * Creates and manages Redis instances on the Google Cloud Platform. */ export class Redis { #client: CredentialsClient | undefined; #baseUrl: string; constructor(client?: CredentialsClient, baseUrl: string = "https://redis.googleapis.com/") { this.#client = client; this.#baseUrl = baseUrl; } /** * Deletes a specific backup. * * @param name Required. Redis backup resource name using the form: `projects/{project_id}/locations/{location_id}/backupCollections/{backup_collection_id}/backups/{backup_id}` */ async projectsLocationsBackupCollectionsBackupsDelete(name: string, opts: ProjectsLocationsBackupCollectionsBackupsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Operation; } /** * Exports a specific backup to a customer target Cloud Storage URI. * * @param name Required. Redis backup resource name using the form: `projects/{project_id}/locations/{location_id}/backupCollections/{backup_collection_id}/backups/{backup_id}` */ async projectsLocationsBackupCollectionsBackupsExport(name: string, req: ExportBackupRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:export`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Gets the details of a specific backup. * * @param name Required. Redis backup resource name using the form: `projects/{project_id}/locations/{location_id}/backupCollections/{backup_collection_id}/backups/{backup_id}` */ async projectsLocationsBackupCollectionsBackupsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as Backup; } /** * Lists all backups owned by a backup collection. * * @param parent Required. The resource name of the backupCollection using the form: `projects/{project_id}/locations/{location_id}/backupCollections/{backup_collection_id}` */ async projectsLocationsBackupCollectionsBackupsList(parent: string, opts: ProjectsLocationsBackupCollectionsBackupsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/backups`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListBackupsResponse; } /** * Get a backup collection. * * @param name Required. Redis backupCollection resource name using the form: `projects/{project_id}/locations/{location_id}/backupCollections/{backup_collection_id}` where `location_id` refers to a GCP region. */ async projectsLocationsBackupCollectionsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as BackupCollection; } /** * Lists all backup collections owned by a consumer project in either the * specified location (region) or all locations. If `location_id` is specified * as `-` (wildcard), then all regions available to the project are queried, * and the results are aggregated. * * @param parent Required. The resource name of the backupCollection location using the form: `projects/{project_id}/locations/{location_id}` where `location_id` refers to a GCP region. */ async projectsLocationsBackupCollectionsList(parent: string, opts: ProjectsLocationsBackupCollectionsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/backupCollections`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListBackupCollectionsResponse; } /** * Backup Redis Cluster. If this is the first time a backup is being created, * a backup collection will be created at the backend, and this backup belongs * to this collection. Both collection and backup will have a resource name. * Backup will be executed for each shard. A replica (primary if nonHA) will * be selected to perform the execution. Backup call will be rejected if there * is an ongoing backup or update operation. * * @param name Required. Redis cluster resource name using the form: `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` where `location_id` refers to a GCP region. */ async projectsLocationsClustersBackup(name: string, req: BackupClusterRequest): Promise { req = serializeBackupClusterRequest(req); const url = new URL(`${this.#baseUrl}v1/${ name }:backup`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Creates a Redis cluster based on the specified properties. The creation is * executed asynchronously and callers may check the returned operation to * track its progress. Once the operation is completed the Redis cluster will * be fully functional. The completed longrunning.Operation will contain the * new cluster object in the response field. The returned operation is * automatically deleted after a few hours, so there is no need to call * DeleteOperation. * * @param parent Required. The resource name of the cluster location using the form: `projects/{project_id}/locations/{location_id}` where `location_id` refers to a GCP region. */ async projectsLocationsClustersCreate(parent: string, req: Cluster, opts: ProjectsLocationsClustersCreateOptions = {}): Promise { req = serializeCluster(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/clusters`); if (opts.clusterId !== undefined) { url.searchParams.append("clusterId", String(opts.clusterId)); } if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Deletes a specific Redis cluster. Cluster stops serving and data is * deleted. * * @param name Required. Redis cluster resource name using the form: `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` where `location_id` refers to a GCP region. */ async projectsLocationsClustersDelete(name: string, opts: ProjectsLocationsClustersDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Operation; } /** * Gets the details of a specific Redis cluster. * * @param name Required. Redis cluster resource name using the form: `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` where `location_id` refers to a GCP region. */ async projectsLocationsClustersGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeCluster(data); } /** * Gets the details of certificate authority information for Redis cluster. * * @param name Required. Redis cluster certificate authority resource name using the form: `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}/certificateAuthority` where `location_id` refers to a GCP region. */ async projectsLocationsClustersGetCertificateAuthority(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as CertificateAuthority; } /** * Lists all Redis clusters owned by a project in either the specified * location (region) or all locations. The location should have the following * format: * `projects/{project_id}/locations/{location_id}` If `location_id` * is specified as `-` (wildcard), then all regions available to the project * are queried, and the results are aggregated. * * @param parent Required. The resource name of the cluster location using the form: `projects/{project_id}/locations/{location_id}` where `location_id` refers to a GCP region. */ async projectsLocationsClustersList(parent: string, opts: ProjectsLocationsClustersListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/clusters`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeListClustersResponse(data); } /** * Updates the metadata and configuration of a specific Redis cluster. * Completed longrunning.Operation will contain the new cluster object in the * response field. The returned operation is automatically deleted after a few * hours, so there is no need to call DeleteOperation. * * @param name Required. Identifier. Unique name of the resource in this scope including project and location using the form: `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` */ async projectsLocationsClustersPatch(name: string, req: Cluster, opts: ProjectsLocationsClustersPatchOptions = {}): Promise { req = serializeCluster(req); opts = serializeProjectsLocationsClustersPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as Operation; } /** * Reschedules upcoming maintenance event. * * @param name Required. Redis Cluster instance resource name using the form: `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` where `location_id` refers to a GCP region. */ async projectsLocationsClustersRescheduleClusterMaintenance(name: string, req: RescheduleClusterMaintenanceRequest): Promise { req = serializeRescheduleClusterMaintenanceRequest(req); const url = new URL(`${this.#baseUrl}v1/${ name }:rescheduleClusterMaintenance`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Gets information about a location. * * @param name Resource name for the location. */ async projectsLocationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as Location; } /** * Creates a Redis instance based on the specified tier and memory size. By * default, the instance is accessible from the project's [default * network](https://cloud.google.com/vpc/docs/vpc). The creation is executed * asynchronously and callers may check the returned operation to track its * progress. Once the operation is completed the Redis instance will be fully * functional. Completed longrunning.Operation will contain the new instance * object in the response field. The returned operation is automatically * deleted after a few hours, so there is no need to call DeleteOperation. * * @param parent Required. The resource name of the instance location using the form: `projects/{project_id}/locations/{location_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesCreate(parent: string, req: Instance, opts: ProjectsLocationsInstancesCreateOptions = {}): Promise { req = serializeInstance(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/instances`); if (opts.instanceId !== undefined) { url.searchParams.append("instanceId", String(opts.instanceId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Deletes a specific Redis instance. Instance stops serving and data is * deleted. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Operation; } /** * Export Redis instance data into a Redis RDB format file in Cloud Storage. * Redis will continue serving during this operation. The returned operation * is automatically deleted after a few hours, so there is no need to call * DeleteOperation. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesExport(name: string, req: ExportInstanceRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:export`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Initiates a failover of the primary node to current replica node for a * specific STANDARD tier Cloud Memorystore for Redis instance. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesFailover(name: string, req: FailoverInstanceRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:failover`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Gets the details of a specific Redis instance. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeInstance(data); } /** * Gets the AUTH string for a Redis instance. If AUTH is not enabled for the * instance the response will be empty. This information is not included in * the details returned to GetInstance. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesGetAuthString(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/authString`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as InstanceAuthString; } /** * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. * Redis may stop serving during this operation. Instance state will be * IMPORTING for entire operation. When complete, the instance will contain * only data from the imported file. The returned operation is automatically * deleted after a few hours, so there is no need to call DeleteOperation. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesImport(name: string, req: ImportInstanceRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:import`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Lists all Redis instances owned by a project in either the specified * location (region) or all locations. The location should have the following * format: * `projects/{project_id}/locations/{location_id}` If `location_id` * is specified as `-` (wildcard), then all regions available to the project * are queried, and the results are aggregated. * * @param parent Required. The resource name of the instance location using the form: `projects/{project_id}/locations/{location_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesList(parent: string, opts: ProjectsLocationsInstancesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/instances`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeListInstancesResponse(data); } /** * Updates the metadata and configuration of a specific Redis instance. * Completed longrunning.Operation will contain the new instance object in the * response field. The returned operation is automatically deleted after a few * hours, so there is no need to call DeleteOperation. * * @param name Required. Unique name of the resource in this scope including project and location using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` Note: Redis instances are managed and addressed at regional level so location_id here refers to a GCP region; however, users may choose which specific zone (or collection of zones for cross-zone instances) an instance should be provisioned in. Refer to location_id and alternative_location_id fields for more details. */ async projectsLocationsInstancesPatch(name: string, req: Instance, opts: ProjectsLocationsInstancesPatchOptions = {}): Promise { req = serializeInstance(req); opts = serializeProjectsLocationsInstancesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as Operation; } /** * Reschedule maintenance for a given instance in a given project and * location. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesRescheduleMaintenance(name: string, req: RescheduleMaintenanceRequest): Promise { req = serializeRescheduleMaintenanceRequest(req); const url = new URL(`${this.#baseUrl}v1/${ name }:rescheduleMaintenance`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Upgrades Redis instance to the newer Redis version specified in the * request. * * @param name Required. Redis instance resource name using the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}` where `location_id` refers to a GCP region. */ async projectsLocationsInstancesUpgrade(name: string, req: UpgradeInstanceRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:upgrade`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Lists information about the supported locations for this service. * * @param name The resource that owns the locations collection, if applicable. */ async projectsLocationsList(name: string, opts: ProjectsLocationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/locations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListLocationsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as Empty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Empty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as Operation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsOperationsList(name: string, opts: ProjectsLocationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListOperationsResponse; } } /** * Configuration of the AOF based persistence. */ export interface AOFConfig { /** * Optional. fsync configuration. */ appendFsync?: | "APPEND_FSYNC_UNSPECIFIED" | "NO" | "EVERYSEC" | "ALWAYS"; } /** * The automated backup config for a cluster. */ export interface AutomatedBackupConfig { /** * Optional. The automated backup mode. If the mode is disabled, the other * fields will be ignored. */ automatedBackupMode?: | "AUTOMATED_BACKUP_MODE_UNSPECIFIED" | "DISABLED" | "ENABLED"; /** * Optional. Trigger automated backups at a fixed frequency. */ fixedFrequencySchedule?: FixedFrequencySchedule; /** * Optional. How long to keep automated backups before the backups are * deleted. If not specified, the default value is 100 years which is also the * maximum value supported. The minimum value is 1 day. */ retention?: number /* Duration */; } function serializeAutomatedBackupConfig(data: any): AutomatedBackupConfig { return { ...data, retention: data["retention"] !== undefined ? data["retention"] : undefined, }; } function deserializeAutomatedBackupConfig(data: any): AutomatedBackupConfig { return { ...data, retention: data["retention"] !== undefined ? data["retention"] : undefined, }; } /** * Configuration for availability of database instance */ export interface AvailabilityConfiguration { /** * Checks for existence of (multi-cluster) routing configuration that allows * automatic failover to a different zone/region in case of an outage. * Applicable to Bigtable resources. */ automaticFailoverRoutingConfigured?: boolean; /** * Availability type. Potential values: * `ZONAL`: The instance serves data * from only one zone. Outages in that zone affect data accessibility. * * `REGIONAL`: The instance can serve data from more than one zone in a region * (it is highly available). */ availabilityType?: | "AVAILABILITY_TYPE_UNSPECIFIED" | "ZONAL" | "REGIONAL" | "MULTI_REGIONAL" | "AVAILABILITY_TYPE_OTHER"; /** * Checks for resources that are configured to have redundancy, and ongoing * replication across regions */ crossRegionReplicaConfigured?: boolean; externalReplicaConfigured?: boolean; promotableReplicaConfigured?: boolean; } /** * Backup of a cluster. */ export interface Backup { /** * Output only. List of backup files of the backup. */ readonly backupFiles?: BackupFile[]; /** * Output only. Type of the backup. */ readonly backupType?: | "BACKUP_TYPE_UNSPECIFIED" | "ON_DEMAND" | "AUTOMATED"; /** * Output only. Cluster resource path of this backup. */ readonly cluster?: string; /** * Output only. Cluster uid of this backup. */ readonly clusterUid?: string; /** * Output only. The time when the backup was created. */ readonly createTime?: Date; /** * Output only. redis-7.2, valkey-7.5 */ readonly engineVersion?: string; /** * Output only. The time when the backup will expire. */ readonly expireTime?: Date; /** * Identifier. Full resource path of the backup. the last part of the name is * the backup id with the following format: [YYYYMMDDHHMMSS]_[Shorted Cluster * UID] OR customer specified while backup cluster. Example: * 20240515123000_1234 */ name?: string; /** * Output only. Node type of the cluster. */ readonly nodeType?: | "NODE_TYPE_UNSPECIFIED" | "REDIS_SHARED_CORE_NANO" | "REDIS_HIGHMEM_MEDIUM" | "REDIS_HIGHMEM_XLARGE" | "REDIS_STANDARD_SMALL"; /** * Output only. Number of replicas for the cluster. */ readonly replicaCount?: number; /** * Output only. Number of shards for the cluster. */ readonly shardCount?: number; /** * Output only. State of the backup. */ readonly state?: | "STATE_UNSPECIFIED" | "CREATING" | "ACTIVE" | "DELETING"; /** * Output only. Total size of the backup in bytes. */ readonly totalSizeBytes?: bigint; } /** * Request for [BackupCluster]. */ export interface BackupClusterRequest { /** * Optional. The id of the backup to be created. If not specified, the * default value ([YYYYMMDDHHMMSS]_[Shortened Cluster UID] is used. */ backupId?: string; /** * Optional. TTL for the backup to expire. Value range is 1 day to 100 years. * If not specified, the default value is 100 years. */ ttl?: number /* Duration */; } function serializeBackupClusterRequest(data: any): BackupClusterRequest { return { ...data, ttl: data["ttl"] !== undefined ? data["ttl"] : undefined, }; } function deserializeBackupClusterRequest(data: any): BackupClusterRequest { return { ...data, ttl: data["ttl"] !== undefined ? data["ttl"] : undefined, }; } /** * BackupCollection of a cluster. */ export interface BackupCollection { /** * Output only. The full resource path of the cluster the backup collection * belongs to. Example: * projects/{project}/locations/{location}/clusters/{cluster} */ readonly cluster?: string; /** * Output only. The cluster uid of the backup collection. */ readonly clusterUid?: string; /** * Identifier. Full resource path of the backup collection. */ name?: string; } /** * Configuration for automatic backups */ export interface BackupConfiguration { /** * Whether customer visible automated backups are enabled on the instance. */ automatedBackupEnabled?: boolean; /** * Backup retention settings. */ backupRetentionSettings?: RetentionSettings; /** * Whether point-in-time recovery is enabled. This is optional field, if the * database service does not have this feature or metadata is not available in * control plane, this can be omitted. */ pointInTimeRecoveryEnabled?: boolean; } function serializeBackupConfiguration(data: any): BackupConfiguration { return { ...data, backupRetentionSettings: data["backupRetentionSettings"] !== undefined ? serializeRetentionSettings(data["backupRetentionSettings"]) : undefined, }; } function deserializeBackupConfiguration(data: any): BackupConfiguration { return { ...data, backupRetentionSettings: data["backupRetentionSettings"] !== undefined ? deserializeRetentionSettings(data["backupRetentionSettings"]) : undefined, }; } /** * Backup is consisted of multiple backup files. */ export interface BackupFile { /** * Output only. The time when the backup file was created. */ readonly createTime?: Date; /** * Output only. e.g: .rdb */ readonly fileName?: string; /** * Output only. Size of the backup file in bytes. */ readonly sizeBytes?: bigint; } /** * A backup run. */ export interface BackupRun { /** * The time the backup operation completed. REQUIRED */ endTime?: Date; /** * Information about why the backup operation failed. This is only present if * the run has the FAILED status. OPTIONAL */ error?: OperationError; /** * The time the backup operation started. REQUIRED */ startTime?: Date; /** * The status of this run. REQUIRED */ status?: | "STATUS_UNSPECIFIED" | "SUCCESSFUL" | "FAILED"; } function serializeBackupRun(data: any): BackupRun { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeBackupRun(data: any): BackupRun { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } export interface CertChain { /** * The certificates that form the CA chain, from leaf to root order. */ certificates?: string[]; } /** * Redis cluster certificate authority */ export interface CertificateAuthority { managedServerCa?: ManagedCertificateAuthority; /** * Identifier. Unique name of the resource in this scope including project, * location and cluster using the form: * `projects/{project}/locations/{location}/clusters/{cluster}/certificateAuthority` */ name?: string; } /** * A cluster instance. */ export interface Cluster { /** * Optional. The authorization mode of the Redis cluster. If not provided, * auth feature is disabled for the cluster. */ authorizationMode?: | "AUTH_MODE_UNSPECIFIED" | "AUTH_MODE_IAM_AUTH" | "AUTH_MODE_DISABLED"; /** * Optional. The automated backup config for the cluster. */ automatedBackupConfig?: AutomatedBackupConfig; /** * Optional. Output only. The backup collection full resource name. Example: * projects/{project}/locations/{location}/backupCollections/{collection} */ readonly backupCollection?: string; /** * Optional. A list of cluster enpoints. */ clusterEndpoints?: ClusterEndpoint[]; /** * Output only. The timestamp associated with the cluster creation request. */ readonly createTime?: Date; /** * Optional. Cross cluster replication config. */ crossClusterReplicationConfig?: CrossClusterReplicationConfig; /** * Optional. The delete operation will fail when the value is set to true. */ deletionProtectionEnabled?: boolean; /** * Output only. Endpoints created on each given network, for Redis clients to * connect to the cluster. Currently only one discovery endpoint is supported. */ readonly discoveryEndpoints?: DiscoveryEndpoint[]; /** * Optional. Backups stored in Cloud Storage buckets. The Cloud Storage * buckets need to be the same region as the clusters. Read permission is * required to import from the provided Cloud Storage objects. */ gcsSource?: GcsBackupSource; /** * Optional. ClusterMaintenancePolicy determines when to allow or deny * updates. */ maintenancePolicy?: ClusterMaintenancePolicy; /** * Output only. ClusterMaintenanceSchedule Output only Published maintenance * schedule. */ readonly maintenanceSchedule?: ClusterMaintenanceSchedule; /** * Optional. Backups generated and managed by memorystore service. */ managedBackupSource?: ManagedBackupSource; /** * Required. Identifier. Unique name of the resource in this scope including * project and location using the form: * `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` */ name?: string; /** * Optional. The type of a redis node in the cluster. NodeType determines the * underlying machine-type of a redis node. */ nodeType?: | "NODE_TYPE_UNSPECIFIED" | "REDIS_SHARED_CORE_NANO" | "REDIS_HIGHMEM_MEDIUM" | "REDIS_HIGHMEM_XLARGE" | "REDIS_STANDARD_SMALL"; /** * Optional. Persistence config (RDB, AOF) for the cluster. */ persistenceConfig?: ClusterPersistenceConfig; /** * Output only. Precise value of redis memory size in GB for the entire * cluster. */ readonly preciseSizeGb?: number; /** * Optional. Each PscConfig configures the consumer network where IPs will be * designated to the cluster for client access through Private Service Connect * Automation. Currently, only one PscConfig is supported. */ pscConfigs?: PscConfig[]; /** * Output only. The list of PSC connections that are auto-created through * service connectivity automation. */ readonly pscConnections?: PscConnection[]; /** * Output only. Service attachment details to configure Psc connections */ readonly pscServiceAttachments?: PscServiceAttachment[]; /** * Optional. Key/Value pairs of customer overrides for mutable Redis Configs */ redisConfigs?: { [key: string]: string }; /** * Optional. The number of replica nodes per shard. */ replicaCount?: number; /** * Optional. Number of shards for the Redis cluster. */ shardCount?: number; /** * Output only. Redis memory size in GB for the entire cluster rounded up to * the next integer. */ readonly sizeGb?: number; /** * Output only. The current state of this cluster. Can be CREATING, READY, * UPDATING, DELETING and SUSPENDED */ readonly state?: | "STATE_UNSPECIFIED" | "CREATING" | "ACTIVE" | "UPDATING" | "DELETING"; /** * Output only. Additional information about the current state of the * cluster. */ readonly stateInfo?: StateInfo; /** * Optional. The in-transit encryption for the Redis cluster. If not * provided, encryption is disabled for the cluster. */ transitEncryptionMode?: | "TRANSIT_ENCRYPTION_MODE_UNSPECIFIED" | "TRANSIT_ENCRYPTION_MODE_DISABLED" | "TRANSIT_ENCRYPTION_MODE_SERVER_AUTHENTICATION"; /** * Output only. System assigned, unique identifier for the cluster. */ readonly uid?: string; /** * Optional. This config will be used to determine how the customer wants us * to distribute cluster resources within the region. */ zoneDistributionConfig?: ZoneDistributionConfig; } function serializeCluster(data: any): Cluster { return { ...data, automatedBackupConfig: data["automatedBackupConfig"] !== undefined ? serializeAutomatedBackupConfig(data["automatedBackupConfig"]) : undefined, persistenceConfig: data["persistenceConfig"] !== undefined ? serializeClusterPersistenceConfig(data["persistenceConfig"]) : undefined, }; } function deserializeCluster(data: any): Cluster { return { ...data, automatedBackupConfig: data["automatedBackupConfig"] !== undefined ? deserializeAutomatedBackupConfig(data["automatedBackupConfig"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, persistenceConfig: data["persistenceConfig"] !== undefined ? deserializeClusterPersistenceConfig(data["persistenceConfig"]) : undefined, }; } /** * ClusterEndpoint consists of PSC connections that are created as a group in * each VPC network for accessing the cluster. In each group, there shall be one * connection for each service attachment in the cluster. */ export interface ClusterEndpoint { /** * A group of PSC connections. They are created in the same VPC network, one * for each service attachment in the cluster. */ connections?: ConnectionDetail[]; } /** * Maintenance policy per cluster. */ export interface ClusterMaintenancePolicy { /** * Output only. The time when the policy was created i.e. Maintenance Window * or Deny Period was assigned. */ readonly createTime?: Date; /** * Output only. The time when the policy was updated i.e. Maintenance Window * or Deny Period was updated. */ readonly updateTime?: Date; /** * Optional. Maintenance window that is applied to resources covered by this * policy. Minimum 1. For the current version, the maximum number of * weekly_maintenance_window is expected to be one. */ weeklyMaintenanceWindow?: ClusterWeeklyMaintenanceWindow[]; } /** * Upcoming maitenance schedule. */ export interface ClusterMaintenanceSchedule { /** * Output only. The end time of any upcoming scheduled maintenance for this * instance. */ readonly endTime?: Date; /** * Output only. The start time of any upcoming scheduled maintenance for this * instance. */ readonly startTime?: Date; } /** * Configuration of the persistence functionality. */ export interface ClusterPersistenceConfig { /** * Optional. AOF configuration. This field will be ignored if mode is not * AOF. */ aofConfig?: AOFConfig; /** * Optional. The mode of persistence. */ mode?: | "PERSISTENCE_MODE_UNSPECIFIED" | "DISABLED" | "RDB" | "AOF"; /** * Optional. RDB configuration. This field will be ignored if mode is not * RDB. */ rdbConfig?: RDBConfig; } function serializeClusterPersistenceConfig(data: any): ClusterPersistenceConfig { return { ...data, rdbConfig: data["rdbConfig"] !== undefined ? serializeRDBConfig(data["rdbConfig"]) : undefined, }; } function deserializeClusterPersistenceConfig(data: any): ClusterPersistenceConfig { return { ...data, rdbConfig: data["rdbConfig"] !== undefined ? deserializeRDBConfig(data["rdbConfig"]) : undefined, }; } /** * Time window specified for weekly operations. */ export interface ClusterWeeklyMaintenanceWindow { /** * Allows to define schedule that runs specified day of the week. */ day?: | "DAY_OF_WEEK_UNSPECIFIED" | "MONDAY" | "TUESDAY" | "WEDNESDAY" | "THURSDAY" | "FRIDAY" | "SATURDAY" | "SUNDAY"; /** * Start time of the window in UTC. */ startTime?: TimeOfDay; } /** * Contains compliance information about a security standard indicating unmet * recommendations. */ export interface Compliance { /** * Industry-wide compliance standards or benchmarks, such as CIS, PCI, and * OWASP. */ standard?: string; /** * Version of the standard or benchmark, for example, 1.1 */ version?: string; } /** * Detailed information of each PSC connection. */ export interface ConnectionDetail { /** * Detailed information of a PSC connection that is created by the customer * who owns the cluster. */ pscConnection?: PscConnection; } /** * Cross cluster replication config. */ export interface CrossClusterReplicationConfig { /** * The role of the cluster in cross cluster replication. */ clusterRole?: | "CLUSTER_ROLE_UNSPECIFIED" | "NONE" | "PRIMARY" | "SECONDARY"; /** * Output only. An output only view of all the member clusters participating * in the cross cluster replication. This view will be provided by every * member cluster irrespective of its cluster role(primary or secondary). A * primary cluster can provide information about all the secondary clusters * replicating from it. However, a secondary cluster only knows about the * primary cluster from which it is replicating. However, for scenarios, where * the primary cluster is unavailable(e.g. regional outage), a GetCluster * request can be sent to any other member cluster and this field will list * all the member clusters participating in cross cluster replication. */ readonly membership?: Membership; /** * Details of the primary cluster that is used as the replication source for * this secondary cluster. This field is only set for a secondary cluster. */ primaryCluster?: RemoteCluster; /** * List of secondary clusters that are replicating from this primary cluster. * This field is only set for a primary cluster. */ secondaryClusters?: RemoteCluster[]; /** * Output only. The last time cross cluster replication config was updated. */ readonly updateTime?: Date; } /** * Any custom metadata associated with the resource. e.g. A spanner instance * can have multiple databases with its own unique metadata. Information for * these individual databases can be captured in custom metadata data */ export interface CustomMetadataData { /** * Metadata for individual internal resources in an instance. e.g. spanner * instance can have multiple databases with unique configuration. */ internalResourceMetadata?: InternalResourceMetadata[]; } function serializeCustomMetadataData(data: any): CustomMetadataData { return { ...data, internalResourceMetadata: data["internalResourceMetadata"] !== undefined ? data["internalResourceMetadata"].map((item: any) => (serializeInternalResourceMetadata(item))) : undefined, }; } function deserializeCustomMetadataData(data: any): CustomMetadataData { return { ...data, internalResourceMetadata: data["internalResourceMetadata"] !== undefined ? data["internalResourceMetadata"].map((item: any) => (deserializeInternalResourceMetadata(item))) : undefined, }; } /** * DatabaseResourceFeed is the top level proto to be used to ingest different * database resource level events into Condor platform. */ export interface DatabaseResourceFeed { /** * Required. Timestamp when feed is generated. */ feedTimestamp?: Date; /** * Required. Type feed to be ingested into condor */ feedType?: | "FEEDTYPE_UNSPECIFIED" | "RESOURCE_METADATA" | "OBSERVABILITY_DATA" | "SECURITY_FINDING_DATA" | "RECOMMENDATION_SIGNAL_DATA"; observabilityMetricData?: ObservabilityMetricData; recommendationSignalData?: DatabaseResourceRecommendationSignalData; resourceHealthSignalData?: DatabaseResourceHealthSignalData; /** * Primary key associated with the Resource. resource_id is available in * individual feed level as well. */ resourceId?: DatabaseResourceId; resourceMetadata?: DatabaseResourceMetadata; } function serializeDatabaseResourceFeed(data: any): DatabaseResourceFeed { return { ...data, feedTimestamp: data["feedTimestamp"] !== undefined ? data["feedTimestamp"].toISOString() : undefined, observabilityMetricData: data["observabilityMetricData"] !== undefined ? serializeObservabilityMetricData(data["observabilityMetricData"]) : undefined, recommendationSignalData: data["recommendationSignalData"] !== undefined ? serializeDatabaseResourceRecommendationSignalData(data["recommendationSignalData"]) : undefined, resourceHealthSignalData: data["resourceHealthSignalData"] !== undefined ? serializeDatabaseResourceHealthSignalData(data["resourceHealthSignalData"]) : undefined, resourceMetadata: data["resourceMetadata"] !== undefined ? serializeDatabaseResourceMetadata(data["resourceMetadata"]) : undefined, }; } function deserializeDatabaseResourceFeed(data: any): DatabaseResourceFeed { return { ...data, feedTimestamp: data["feedTimestamp"] !== undefined ? new Date(data["feedTimestamp"]) : undefined, observabilityMetricData: data["observabilityMetricData"] !== undefined ? deserializeObservabilityMetricData(data["observabilityMetricData"]) : undefined, recommendationSignalData: data["recommendationSignalData"] !== undefined ? deserializeDatabaseResourceRecommendationSignalData(data["recommendationSignalData"]) : undefined, resourceHealthSignalData: data["resourceHealthSignalData"] !== undefined ? deserializeDatabaseResourceHealthSignalData(data["resourceHealthSignalData"]) : undefined, resourceMetadata: data["resourceMetadata"] !== undefined ? deserializeDatabaseResourceMetadata(data["resourceMetadata"]) : undefined, }; } /** * Common model for database resource health signal data. */ export interface DatabaseResourceHealthSignalData { /** * Any other additional metadata */ additionalMetadata?: { [key: string]: any }; /** * Industry standards associated with this signal; if this signal is an * issue, that could be a violation of the associated industry standard(s). * For example, AUTO_BACKUP_DISABLED signal is associated with CIS GCP 1.1, * CIS GCP 1.2, CIS GCP 1.3, NIST 800-53 and ISO-27001 compliance standards. * If a database resource does not have automated backup enable, it will * violate these following industry standards. */ compliance?: Compliance[]; /** * Description associated with signal */ description?: string; /** * Required. The last time at which the event described by this signal took * place */ eventTime?: Date; /** * The external-uri of the signal, using which more information about this * signal can be obtained. In GCP, this will take user to SCC page to get more * details about signals. */ externalUri?: string; /** * Required. The name of the signal, ex: PUBLIC_SQL_INSTANCE, * SQL_LOG_ERROR_VERBOSITY etc. */ name?: string; /** * Cloud provider name. Ex: GCP/AWS/Azure/OnPrem/SelfManaged */ provider?: | "PROVIDER_UNSPECIFIED" | "GCP" | "AWS" | "AZURE" | "ONPREM" | "SELFMANAGED" | "PROVIDER_OTHER"; /** * Closest parent container of this resource. In GCP, 'container' refers to a * Cloud Resource Manager project. It must be resource name of a Cloud * Resource Manager project with the format of "provider//", such as * "projects/123". For GCP provided resources, number should be project * number. */ resourceContainer?: string; /** * Required. Database resource name associated with the signal. Resource name * to follow CAIS resource_name format as noted here * go/condor-common-datamodel */ resourceName?: string; /** * Required. The class of the signal, such as if it's a THREAT or * VULNERABILITY. */ signalClass?: | "CLASS_UNSPECIFIED" | "THREAT" | "VULNERABILITY" | "MISCONFIGURATION" | "OBSERVATION" | "ERROR"; /** * Required. Unique identifier for the signal. This is an unique id which * would be mainatined by partner to identify a signal. */ signalId?: string; /** * The severity of the signal, such as if it's a HIGH or LOW severity. */ signalSeverity?: | "SIGNAL_SEVERITY_UNSPECIFIED" | "CRITICAL" | "HIGH" | "MEDIUM" | "LOW"; /** * Required. Type of signal, for example, `AVAILABLE_IN_MULTIPLE_ZONES`, * `LOGGING_MOST_ERRORS`, etc. */ signalType?: | "SIGNAL_TYPE_UNSPECIFIED" | "SIGNAL_TYPE_NOT_PROTECTED_BY_AUTOMATIC_FAILOVER" | "SIGNAL_TYPE_GROUP_NOT_REPLICATING_ACROSS_REGIONS" | "SIGNAL_TYPE_NOT_AVAILABLE_IN_MULTIPLE_ZONES" | "SIGNAL_TYPE_NOT_AVAILABLE_IN_MULTIPLE_REGIONS" | "SIGNAL_TYPE_NO_PROMOTABLE_REPLICA" | "SIGNAL_TYPE_NO_AUTOMATED_BACKUP_POLICY" | "SIGNAL_TYPE_SHORT_BACKUP_RETENTION" | "SIGNAL_TYPE_LAST_BACKUP_FAILED" | "SIGNAL_TYPE_LAST_BACKUP_OLD" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_2_0" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_3" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_2" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_1" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_0" | "SIGNAL_TYPE_VIOLATES_CIS_CONTROLS_V8_0" | "SIGNAL_TYPE_VIOLATES_NIST_800_53" | "SIGNAL_TYPE_VIOLATES_NIST_800_53_R5" | "SIGNAL_TYPE_VIOLATES_NIST_CYBERSECURITY_FRAMEWORK_V1_0" | "SIGNAL_TYPE_VIOLATES_ISO_27001" | "SIGNAL_TYPE_VIOLATES_ISO_27001_V2022" | "SIGNAL_TYPE_VIOLATES_PCI_DSS_V3_2_1" | "SIGNAL_TYPE_VIOLATES_PCI_DSS_V4_0" | "SIGNAL_TYPE_VIOLATES_CLOUD_CONTROLS_MATRIX_V4" | "SIGNAL_TYPE_VIOLATES_HIPAA" | "SIGNAL_TYPE_VIOLATES_SOC2_V2017" | "SIGNAL_TYPE_LOGS_NOT_OPTIMIZED_FOR_TROUBLESHOOTING" | "SIGNAL_TYPE_QUERY_DURATIONS_NOT_LOGGED" | "SIGNAL_TYPE_VERBOSE_ERROR_LOGGING" | "SIGNAL_TYPE_QUERY_LOCK_WAITS_NOT_LOGGED" | "SIGNAL_TYPE_LOGGING_MOST_ERRORS" | "SIGNAL_TYPE_LOGGING_ONLY_CRITICAL_ERRORS" | "SIGNAL_TYPE_MINIMAL_ERROR_LOGGING" | "SIGNAL_TYPE_QUERY_STATISTICS_LOGGED" | "SIGNAL_TYPE_EXCESSIVE_LOGGING_OF_CLIENT_HOSTNAME" | "SIGNAL_TYPE_EXCESSIVE_LOGGING_OF_PARSER_STATISTICS" | "SIGNAL_TYPE_EXCESSIVE_LOGGING_OF_PLANNER_STATISTICS" | "SIGNAL_TYPE_NOT_LOGGING_ONLY_DDL_STATEMENTS" | "SIGNAL_TYPE_LOGGING_QUERY_STATISTICS" | "SIGNAL_TYPE_NOT_LOGGING_TEMPORARY_FILES" | "SIGNAL_TYPE_CONNECTION_MAX_NOT_CONFIGURED" | "SIGNAL_TYPE_USER_OPTIONS_CONFIGURED" | "SIGNAL_TYPE_EXPOSED_TO_PUBLIC_ACCESS" | "SIGNAL_TYPE_UNENCRYPTED_CONNECTIONS" | "SIGNAL_TYPE_NO_ROOT_PASSWORD" | "SIGNAL_TYPE_WEAK_ROOT_PASSWORD" | "SIGNAL_TYPE_ENCRYPTION_KEY_NOT_CUSTOMER_MANAGED" | "SIGNAL_TYPE_SERVER_AUTHENTICATION_NOT_REQUIRED" | "SIGNAL_TYPE_EXPOSED_BY_OWNERSHIP_CHAINING" | "SIGNAL_TYPE_EXPOSED_TO_EXTERNAL_SCRIPTS" | "SIGNAL_TYPE_EXPOSED_TO_LOCAL_DATA_LOADS" | "SIGNAL_TYPE_CONNECTION_ATTEMPTS_NOT_LOGGED" | "SIGNAL_TYPE_DISCONNECTIONS_NOT_LOGGED" | "SIGNAL_TYPE_LOGGING_EXCESSIVE_STATEMENT_INFO" | "SIGNAL_TYPE_EXPOSED_TO_REMOTE_ACCESS" | "SIGNAL_TYPE_DATABASE_NAMES_EXPOSED" | "SIGNAL_TYPE_SENSITIVE_TRACE_INFO_NOT_MASKED" | "SIGNAL_TYPE_PUBLIC_IP_ENABLED" | "SIGNAL_TYPE_IDLE" | "SIGNAL_TYPE_OVERPROVISIONED" | "SIGNAL_TYPE_HIGH_NUMBER_OF_OPEN_TABLES" | "SIGNAL_TYPE_HIGH_NUMBER_OF_TABLES" | "SIGNAL_TYPE_HIGH_TRANSACTION_ID_UTILIZATION" | "SIGNAL_TYPE_UNDERPROVISIONED" | "SIGNAL_TYPE_OUT_OF_DISK" | "SIGNAL_TYPE_SERVER_CERTIFICATE_NEAR_EXPIRY" | "SIGNAL_TYPE_DATABASE_AUDITING_DISABLED" | "SIGNAL_TYPE_RESTRICT_AUTHORIZED_NETWORKS" | "SIGNAL_TYPE_VIOLATE_POLICY_RESTRICT_PUBLIC_IP" | "SIGNAL_TYPE_QUOTA_LIMIT" | "SIGNAL_TYPE_NO_PASSWORD_POLICY" | "SIGNAL_TYPE_CONNECTIONS_PERFORMANCE_IMPACT" | "SIGNAL_TYPE_TMP_TABLES_PERFORMANCE_IMPACT" | "SIGNAL_TYPE_TRANS_LOGS_PERFORMANCE_IMPACT" | "SIGNAL_TYPE_HIGH_JOINS_WITHOUT_INDEXES" | "SIGNAL_TYPE_SUPERUSER_WRITING_TO_USER_TABLES" | "SIGNAL_TYPE_USER_GRANTED_ALL_PERMISSIONS" | "SIGNAL_TYPE_DATA_EXPORT_TO_EXTERNAL_CLOUD_STORAGE_BUCKET" | "SIGNAL_TYPE_DATA_EXPORT_TO_PUBLIC_CLOUD_STORAGE_BUCKET"; state?: | "STATE_UNSPECIFIED" | "ACTIVE" | "RESOLVED" | "MUTED"; } function serializeDatabaseResourceHealthSignalData(data: any): DatabaseResourceHealthSignalData { return { ...data, eventTime: data["eventTime"] !== undefined ? data["eventTime"].toISOString() : undefined, }; } function deserializeDatabaseResourceHealthSignalData(data: any): DatabaseResourceHealthSignalData { return { ...data, eventTime: data["eventTime"] !== undefined ? new Date(data["eventTime"]) : undefined, }; } /** * DatabaseResourceId will serve as primary key for any resource ingestion * event. */ export interface DatabaseResourceId { /** * Required. Cloud provider name. Ex: GCP/AWS/Azure/OnPrem/SelfManaged */ provider?: | "PROVIDER_UNSPECIFIED" | "GCP" | "AWS" | "AZURE" | "ONPREM" | "SELFMANAGED" | "PROVIDER_OTHER"; /** * Optional. Needs to be used only when the provider is PROVIDER_OTHER. */ providerDescription?: string; /** * Required. The type of resource this ID is identifying. Ex * redis.googleapis.com/Instance, redis.googleapis.com/Cluster, * alloydb.googleapis.com/Cluster, alloydb.googleapis.com/Instance, * spanner.googleapis.com/Instance, spanner.googleapis.com/Database, * firestore.googleapis.com/Database, sqladmin.googleapis.com/Instance, * bigtableadmin.googleapis.com/Cluster, bigtableadmin.googleapis.com/Instance * REQUIRED Please refer go/condor-common-datamodel */ resourceType?: string; /** * Required. A service-local token that distinguishes this resource from * other resources within the same service. */ uniqueId?: string; } /** * Common model for database resource instance metadata. Next ID: 23 */ export interface DatabaseResourceMetadata { /** * Availability configuration for this instance */ availabilityConfiguration?: AvailabilityConfiguration; /** * Backup configuration for this instance */ backupConfiguration?: BackupConfiguration; /** * Latest backup run information for this instance */ backupRun?: BackupRun; /** * The creation time of the resource, i.e. the time when resource is created * and recorded in partner service. */ creationTime?: Date; /** * Current state of the instance. */ currentState?: | "STATE_UNSPECIFIED" | "HEALTHY" | "UNHEALTHY" | "SUSPENDED" | "DELETED" | "STATE_OTHER"; /** * Any custom metadata associated with the resource */ customMetadata?: CustomMetadataData; /** * Optional. Edition represents whether the instance is ENTERPRISE or * ENTERPRISE_PLUS. This information is core to Cloud SQL only and is used to * identify the edition of the instance. */ edition?: | "EDITION_UNSPECIFIED" | "EDITION_ENTERPRISE" | "EDITION_ENTERPRISE_PLUS"; /** * Entitlements associated with the resource */ entitlements?: Entitlement[]; /** * The state that the instance is expected to be in. For example, an instance * state can transition to UNHEALTHY due to wrong patch update, while the * expected state will remain at the HEALTHY. */ expectedState?: | "STATE_UNSPECIFIED" | "HEALTHY" | "UNHEALTHY" | "SUSPENDED" | "DELETED" | "STATE_OTHER"; /** * Required. Unique identifier for a Database resource */ id?: DatabaseResourceId; /** * The type of the instance. Specified at creation time. */ instanceType?: | "INSTANCE_TYPE_UNSPECIFIED" | "SUB_RESOURCE_TYPE_UNSPECIFIED" | "PRIMARY" | "SECONDARY" | "READ_REPLICA" | "OTHER" | "SUB_RESOURCE_TYPE_PRIMARY" | "SUB_RESOURCE_TYPE_SECONDARY" | "SUB_RESOURCE_TYPE_READ_REPLICA" | "SUB_RESOURCE_TYPE_OTHER"; /** * The resource location. REQUIRED */ location?: string; /** * Machine configuration for this resource. */ machineConfiguration?: MachineConfiguration; /** * Identifier for this resource's immediate parent/primary resource if the * current resource is a replica or derived form of another Database resource. * Else it would be NULL. REQUIRED if the immediate parent exists when first * time resource is getting ingested, otherwise optional. */ primaryResourceId?: DatabaseResourceId; /** * Primary resource location. REQUIRED if the immediate parent exists when * first time resource is getting ingested, otherwise optional. */ primaryResourceLocation?: string; /** * The product this resource represents. */ product?: Product; /** * Closest parent Cloud Resource Manager container of this resource. It must * be resource name of a Cloud Resource Manager project with the format of * "/", such as "projects/123". For GCP provided resources, number should be * project number. */ resourceContainer?: string; /** * Required. Different from DatabaseResourceId.unique_id, a resource name can * be reused over time. That is, after a resource named "ABC" is deleted, the * name "ABC" can be used to to create a new resource within the same source. * Resource name to follow CAIS resource_name format as noted here * go/condor-common-datamodel */ resourceName?: string; /** * Optional. Tags associated with this resources. */ tagsSet?: Tags; /** * The time at which the resource was updated and recorded at partner * service. */ updationTime?: Date; /** * User-provided labels associated with the resource */ userLabelSet?: UserLabels; } function serializeDatabaseResourceMetadata(data: any): DatabaseResourceMetadata { return { ...data, backupConfiguration: data["backupConfiguration"] !== undefined ? serializeBackupConfiguration(data["backupConfiguration"]) : undefined, backupRun: data["backupRun"] !== undefined ? serializeBackupRun(data["backupRun"]) : undefined, creationTime: data["creationTime"] !== undefined ? data["creationTime"].toISOString() : undefined, customMetadata: data["customMetadata"] !== undefined ? serializeCustomMetadataData(data["customMetadata"]) : undefined, machineConfiguration: data["machineConfiguration"] !== undefined ? serializeMachineConfiguration(data["machineConfiguration"]) : undefined, updationTime: data["updationTime"] !== undefined ? data["updationTime"].toISOString() : undefined, }; } function deserializeDatabaseResourceMetadata(data: any): DatabaseResourceMetadata { return { ...data, backupConfiguration: data["backupConfiguration"] !== undefined ? deserializeBackupConfiguration(data["backupConfiguration"]) : undefined, backupRun: data["backupRun"] !== undefined ? deserializeBackupRun(data["backupRun"]) : undefined, creationTime: data["creationTime"] !== undefined ? new Date(data["creationTime"]) : undefined, customMetadata: data["customMetadata"] !== undefined ? deserializeCustomMetadataData(data["customMetadata"]) : undefined, machineConfiguration: data["machineConfiguration"] !== undefined ? deserializeMachineConfiguration(data["machineConfiguration"]) : undefined, updationTime: data["updationTime"] !== undefined ? new Date(data["updationTime"]) : undefined, }; } /** * Common model for database resource recommendation signal data. */ export interface DatabaseResourceRecommendationSignalData { /** * Optional. Any other additional metadata specific to recommendation */ additionalMetadata?: { [key: string]: any }; /** * Required. last time recommendationw as refreshed */ lastRefreshTime?: Date; /** * Required. Recommendation state */ recommendationState?: | "UNSPECIFIED" | "ACTIVE" | "CLAIMED" | "SUCCEEDED" | "FAILED" | "DISMISSED"; /** * Required. Name of recommendation. Examples: * organizations/1234/locations/us-central1/recommenders/google.cloudsql.instance.PerformanceRecommender/recommendations/9876 */ recommender?: string; /** * Required. ID of recommender. Examples: * "google.cloudsql.instance.PerformanceRecommender" */ recommenderId?: string; /** * Required. Contains an identifier for a subtype of recommendations produced * for the same recommender. Subtype is a function of content and impact, * meaning a new subtype might be added when significant changes to `content` * or `primary_impact.category` are introduced. See the Recommenders section * to see a list of subtypes for a given Recommender. Examples: For * recommender = "google.cloudsql.instance.PerformanceRecommender", * recommender_subtype can be * "MYSQL_HIGH_NUMBER_OF_OPEN_TABLES_BEST_PRACTICE"/"POSTGRES_HIGH_TRANSACTION_ID_UTILIZATION_BEST_PRACTICE" */ recommenderSubtype?: string; /** * Required. Database resource name associated with the signal. Resource name * to follow CAIS resource_name format as noted here * go/condor-common-datamodel */ resourceName?: string; /** * Required. Type of signal, for example, `SIGNAL_TYPE_IDLE`, * `SIGNAL_TYPE_HIGH_NUMBER_OF_TABLES`, etc. */ signalType?: | "SIGNAL_TYPE_UNSPECIFIED" | "SIGNAL_TYPE_NOT_PROTECTED_BY_AUTOMATIC_FAILOVER" | "SIGNAL_TYPE_GROUP_NOT_REPLICATING_ACROSS_REGIONS" | "SIGNAL_TYPE_NOT_AVAILABLE_IN_MULTIPLE_ZONES" | "SIGNAL_TYPE_NOT_AVAILABLE_IN_MULTIPLE_REGIONS" | "SIGNAL_TYPE_NO_PROMOTABLE_REPLICA" | "SIGNAL_TYPE_NO_AUTOMATED_BACKUP_POLICY" | "SIGNAL_TYPE_SHORT_BACKUP_RETENTION" | "SIGNAL_TYPE_LAST_BACKUP_FAILED" | "SIGNAL_TYPE_LAST_BACKUP_OLD" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_2_0" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_3" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_2" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_1" | "SIGNAL_TYPE_VIOLATES_CIS_GCP_FOUNDATION_1_0" | "SIGNAL_TYPE_VIOLATES_CIS_CONTROLS_V8_0" | "SIGNAL_TYPE_VIOLATES_NIST_800_53" | "SIGNAL_TYPE_VIOLATES_NIST_800_53_R5" | "SIGNAL_TYPE_VIOLATES_NIST_CYBERSECURITY_FRAMEWORK_V1_0" | "SIGNAL_TYPE_VIOLATES_ISO_27001" | "SIGNAL_TYPE_VIOLATES_ISO_27001_V2022" | "SIGNAL_TYPE_VIOLATES_PCI_DSS_V3_2_1" | "SIGNAL_TYPE_VIOLATES_PCI_DSS_V4_0" | "SIGNAL_TYPE_VIOLATES_CLOUD_CONTROLS_MATRIX_V4" | "SIGNAL_TYPE_VIOLATES_HIPAA" | "SIGNAL_TYPE_VIOLATES_SOC2_V2017" | "SIGNAL_TYPE_LOGS_NOT_OPTIMIZED_FOR_TROUBLESHOOTING" | "SIGNAL_TYPE_QUERY_DURATIONS_NOT_LOGGED" | "SIGNAL_TYPE_VERBOSE_ERROR_LOGGING" | "SIGNAL_TYPE_QUERY_LOCK_WAITS_NOT_LOGGED" | "SIGNAL_TYPE_LOGGING_MOST_ERRORS" | "SIGNAL_TYPE_LOGGING_ONLY_CRITICAL_ERRORS" | "SIGNAL_TYPE_MINIMAL_ERROR_LOGGING" | "SIGNAL_TYPE_QUERY_STATISTICS_LOGGED" | "SIGNAL_TYPE_EXCESSIVE_LOGGING_OF_CLIENT_HOSTNAME" | "SIGNAL_TYPE_EXCESSIVE_LOGGING_OF_PARSER_STATISTICS" | "SIGNAL_TYPE_EXCESSIVE_LOGGING_OF_PLANNER_STATISTICS" | "SIGNAL_TYPE_NOT_LOGGING_ONLY_DDL_STATEMENTS" | "SIGNAL_TYPE_LOGGING_QUERY_STATISTICS" | "SIGNAL_TYPE_NOT_LOGGING_TEMPORARY_FILES" | "SIGNAL_TYPE_CONNECTION_MAX_NOT_CONFIGURED" | "SIGNAL_TYPE_USER_OPTIONS_CONFIGURED" | "SIGNAL_TYPE_EXPOSED_TO_PUBLIC_ACCESS" | "SIGNAL_TYPE_UNENCRYPTED_CONNECTIONS" | "SIGNAL_TYPE_NO_ROOT_PASSWORD" | "SIGNAL_TYPE_WEAK_ROOT_PASSWORD" | "SIGNAL_TYPE_ENCRYPTION_KEY_NOT_CUSTOMER_MANAGED" | "SIGNAL_TYPE_SERVER_AUTHENTICATION_NOT_REQUIRED" | "SIGNAL_TYPE_EXPOSED_BY_OWNERSHIP_CHAINING" | "SIGNAL_TYPE_EXPOSED_TO_EXTERNAL_SCRIPTS" | "SIGNAL_TYPE_EXPOSED_TO_LOCAL_DATA_LOADS" | "SIGNAL_TYPE_CONNECTION_ATTEMPTS_NOT_LOGGED" | "SIGNAL_TYPE_DISCONNECTIONS_NOT_LOGGED" | "SIGNAL_TYPE_LOGGING_EXCESSIVE_STATEMENT_INFO" | "SIGNAL_TYPE_EXPOSED_TO_REMOTE_ACCESS" | "SIGNAL_TYPE_DATABASE_NAMES_EXPOSED" | "SIGNAL_TYPE_SENSITIVE_TRACE_INFO_NOT_MASKED" | "SIGNAL_TYPE_PUBLIC_IP_ENABLED" | "SIGNAL_TYPE_IDLE" | "SIGNAL_TYPE_OVERPROVISIONED" | "SIGNAL_TYPE_HIGH_NUMBER_OF_OPEN_TABLES" | "SIGNAL_TYPE_HIGH_NUMBER_OF_TABLES" | "SIGNAL_TYPE_HIGH_TRANSACTION_ID_UTILIZATION" | "SIGNAL_TYPE_UNDERPROVISIONED" | "SIGNAL_TYPE_OUT_OF_DISK" | "SIGNAL_TYPE_SERVER_CERTIFICATE_NEAR_EXPIRY" | "SIGNAL_TYPE_DATABASE_AUDITING_DISABLED" | "SIGNAL_TYPE_RESTRICT_AUTHORIZED_NETWORKS" | "SIGNAL_TYPE_VIOLATE_POLICY_RESTRICT_PUBLIC_IP" | "SIGNAL_TYPE_QUOTA_LIMIT" | "SIGNAL_TYPE_NO_PASSWORD_POLICY" | "SIGNAL_TYPE_CONNECTIONS_PERFORMANCE_IMPACT" | "SIGNAL_TYPE_TMP_TABLES_PERFORMANCE_IMPACT" | "SIGNAL_TYPE_TRANS_LOGS_PERFORMANCE_IMPACT" | "SIGNAL_TYPE_HIGH_JOINS_WITHOUT_INDEXES" | "SIGNAL_TYPE_SUPERUSER_WRITING_TO_USER_TABLES" | "SIGNAL_TYPE_USER_GRANTED_ALL_PERMISSIONS" | "SIGNAL_TYPE_DATA_EXPORT_TO_EXTERNAL_CLOUD_STORAGE_BUCKET" | "SIGNAL_TYPE_DATA_EXPORT_TO_PUBLIC_CLOUD_STORAGE_BUCKET"; } function serializeDatabaseResourceRecommendationSignalData(data: any): DatabaseResourceRecommendationSignalData { return { ...data, lastRefreshTime: data["lastRefreshTime"] !== undefined ? data["lastRefreshTime"].toISOString() : undefined, }; } function deserializeDatabaseResourceRecommendationSignalData(data: any): DatabaseResourceRecommendationSignalData { return { ...data, lastRefreshTime: data["lastRefreshTime"] !== undefined ? new Date(data["lastRefreshTime"]) : undefined, }; } /** * Endpoints on each network, for Redis clients to connect to the cluster. */ export interface DiscoveryEndpoint { /** * Output only. Address of the exposed Redis endpoint used by clients to * connect to the service. The address could be either IP or hostname. */ readonly address?: string; /** * Output only. The port number of the exposed Redis endpoint. */ readonly port?: number; /** * Output only. Customer configuration for where the endpoint is created and * accessed from. */ readonly pscConfig?: PscConfig; } /** * A generic empty message that you can re-use to avoid defining duplicated * empty messages in your APIs. A typical example is to use it as the request or * the response type of an API method. For instance: service Foo { rpc * Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } */ export interface Empty { } /** * Proto representing the access that a user has to a specific feature/service. * NextId: 3. */ export interface Entitlement { /** * The current state of user's accessibility to a feature/benefit. */ entitlementState?: | "ENTITLEMENT_STATE_UNSPECIFIED" | "ENTITLED" | "REVOKED"; /** * An enum that represents the type of this entitlement. */ type?: | "ENTITLEMENT_TYPE_UNSPECIFIED" | "GEMINI"; } /** * Request for [ExportBackup]. */ export interface ExportBackupRequest { /** * Google Cloud Storage bucket, like "my-bucket". */ gcsBucket?: string; } /** * Request for Export. */ export interface ExportInstanceRequest { /** * Required. Specify data to be exported. */ outputConfig?: OutputConfig; } /** * Request for Failover. */ export interface FailoverInstanceRequest { /** * Optional. Available data protection modes that the user can choose. If * it's unspecified, data protection mode will be LIMITED_DATA_LOSS by * default. */ dataProtectionMode?: | "DATA_PROTECTION_MODE_UNSPECIFIED" | "LIMITED_DATA_LOSS" | "FORCE_DATA_LOSS"; } /** * This schedule allows the backup to be triggered at a fixed frequency * (currently only daily is supported). */ export interface FixedFrequencySchedule { /** * Optional. The start time of every automated backup in UTC. It must be set * to the start of an hour. If not specified, the default value is the start * of the hour when the automated backup config is enabled. For example, if * the automated backup config is enabled at 10:13 AM UTC without specifying * start_time, the default start time is 10:00 AM UTC. */ startTime?: TimeOfDay; } /** * Backups stored in Cloud Storage buckets. The Cloud Storage buckets need to * be the same region as the clusters. */ export interface GcsBackupSource { /** * Optional. URIs of the GCS objects to import. Example: * gs://bucket1/object1, gs://bucket2/folder2/object2 */ uris?: string[]; } /** * The Cloud Storage location for the output content */ export interface GcsDestination { /** * Required. Data destination URI (e.g. 'gs://my_bucket/my_object'). Existing * files will be overwritten. */ uri?: string; } /** * The Cloud Storage location for the input content */ export interface GcsSource { /** * Required. Source data URI. (e.g. 'gs://my_bucket/my_object'). */ uri?: string; } /** * This location metadata represents additional configuration options for a * given location where a Redis instance may be created. All fields are output * only. It is returned as content of the * `google.cloud.location.Location.metadata` field. */ export interface GoogleCloudRedisV1LocationMetadata { /** * Output only. The set of available zones in the location. The map is keyed * by the lowercase ID of each zone, as defined by GCE. These keys can be * specified in `location_id` or `alternative_location_id` fields when * creating a Redis instance. */ readonly availableZones?: { [key: string]: GoogleCloudRedisV1ZoneMetadata }; } /** * Represents the v1 metadata of the long-running operation. */ export interface GoogleCloudRedisV1OperationMetadata { /** * API version. */ apiVersion?: string; /** * Specifies if cancellation was requested for the operation. */ cancelRequested?: boolean; /** * Creation timestamp. */ createTime?: Date; /** * End timestamp. */ endTime?: Date; /** * Operation status details. */ statusDetail?: string; /** * Operation target. */ target?: string; /** * Operation verb. */ verb?: string; } function serializeGoogleCloudRedisV1OperationMetadata(data: any): GoogleCloudRedisV1OperationMetadata { return { ...data, createTime: data["createTime"] !== undefined ? data["createTime"].toISOString() : undefined, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, }; } function deserializeGoogleCloudRedisV1OperationMetadata(data: any): GoogleCloudRedisV1OperationMetadata { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, }; } /** * Defines specific information for a particular zone. Currently empty and * reserved for future use only. */ export interface GoogleCloudRedisV1ZoneMetadata { } /** * Request for Import. */ export interface ImportInstanceRequest { /** * Required. Specify data to be imported. */ inputConfig?: InputConfig; } /** * The input content */ export interface InputConfig { /** * Google Cloud Storage location where input content is located. */ gcsSource?: GcsSource; } /** * A Memorystore for Redis instance. */ export interface Instance { /** * Optional. If specified, at least one node will be provisioned in this zone * in addition to the zone specified in location_id. Only applicable to * standard tier. If provided, it must be a different zone from the one * provided in [location_id]. Additional nodes beyond the first 2 will be * placed in zones selected by the service. */ alternativeLocationId?: string; /** * Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If * set to "true" AUTH is enabled on the instance. Default value is "false" * meaning AUTH is disabled. */ authEnabled?: boolean; /** * Optional. The full name of the Google Compute Engine * [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is * connected. If left unspecified, the `default` network will be used. */ authorizedNetwork?: string; /** * Optional. The available maintenance versions that an instance could update * to. */ availableMaintenanceVersions?: string[]; /** * Optional. The network connect mode of the Redis instance. If not provided, * the connect mode defaults to DIRECT_PEERING. */ connectMode?: | "CONNECT_MODE_UNSPECIFIED" | "DIRECT_PEERING" | "PRIVATE_SERVICE_ACCESS"; /** * Output only. The time the instance was created. */ readonly createTime?: Date; /** * Output only. The current zone where the Redis primary node is located. In * basic tier, this will always be the same as [location_id]. In standard * tier, this can be the zone of any node in the instance. */ readonly currentLocationId?: string; /** * Optional. The KMS key reference that the customer provides when trying to * create the instance. */ customerManagedKey?: string; /** * An arbitrary and optional user-provided name for the instance. */ displayName?: string; /** * Output only. Hostname or IP address of the exposed Redis endpoint used by * clients to connect to the service. */ readonly host?: string; /** * Resource labels to represent user provided metadata */ labels?: { [key: string]: string }; /** * Optional. The zone where the instance will be provisioned. If not * provided, the service will choose a zone from the specified region for the * instance. For standard tier, additional nodes will be added across multiple * zones for protection against zonal failures. If specified, at least one * node will be provisioned in this zone. */ locationId?: string; /** * Optional. The maintenance policy for the instance. If not provided, * maintenance events can be performed at any time. */ maintenancePolicy?: MaintenancePolicy; /** * Output only. Date and time of upcoming maintenance events which have been * scheduled. */ readonly maintenanceSchedule?: MaintenanceSchedule; /** * Optional. The self service update maintenance version. The version is date * based such as "20210712_00_00". */ maintenanceVersion?: string; /** * Required. Redis memory size in GiB. */ memorySizeGb?: number; /** * Required. Unique name of the resource in this scope including project and * location using the form: * `projects/{project_id}/locations/{location_id}/instances/{instance_id}` * Note: Redis instances are managed and addressed at regional level so * location_id here refers to a GCP region; however, users may choose which * specific zone (or collection of zones for cross-zone instances) an instance * should be provisioned in. Refer to location_id and alternative_location_id * fields for more details. */ name?: string; /** * Output only. Info per node. */ readonly nodes?: NodeInfo[]; /** * Optional. Persistence configuration parameters */ persistenceConfig?: PersistenceConfig; /** * Output only. Cloud IAM identity used by import / export operations to * transfer data to/from Cloud Storage. Format is "serviceAccount:". The value * may change over time for a given instance so should be checked before each * import/export operation. */ readonly persistenceIamIdentity?: string; /** * Output only. The port number of the exposed Redis endpoint. */ readonly port?: number; /** * Output only. Hostname or IP address of the exposed readonly Redis * endpoint. Standard tier only. Targets all healthy replica nodes in * instance. Replication is asynchronous and replica nodes will exhibit some * lag behind the primary. Write requests must target 'host'. */ readonly readEndpoint?: string; /** * Output only. The port number of the exposed readonly redis endpoint. * Standard tier only. Write requests should target 'port'. */ readonly readEndpointPort?: number; /** * Optional. Read replicas mode for the instance. Defaults to * READ_REPLICAS_DISABLED. */ readReplicasMode?: | "READ_REPLICAS_MODE_UNSPECIFIED" | "READ_REPLICAS_DISABLED" | "READ_REPLICAS_ENABLED"; /** * Optional. Redis configuration parameters, according to * http://redis.io/topics/config. Currently, the only supported parameters * are: Redis version 3.2 and newer: * maxmemory-policy * * notify-keyspace-events Redis version 4.0 and newer: * activedefrag * * lfu-decay-time * lfu-log-factor * maxmemory-gb Redis version 5.0 and newer: * * stream-node-max-bytes * stream-node-max-entries */ redisConfigs?: { [key: string]: string }; /** * Optional. The version of Redis software. If not provided, latest supported * version will be used. Currently, the supported values are: * `REDIS_3_2` * for Redis 3.2 compatibility * `REDIS_4_0` for Redis 4.0 compatibility * (default) * `REDIS_5_0` for Redis 5.0 compatibility * `REDIS_6_X` for Redis * 6.x compatibility * `REDIS_7_0` for Redis 7.0 compatibility */ redisVersion?: string; /** * Optional. The number of replica nodes. The valid range for the Standard * Tier with read replicas enabled is [1-5] and defaults to 2. If read * replicas are not enabled for a Standard Tier instance, the only valid value * is 1 and the default is 1. The valid value for basic tier is 0 and the * default is also 0. */ replicaCount?: number; /** * Optional. For DIRECT_PEERING mode, the CIDR range of internal addresses * that are reserved for this instance. Range must be unique and * non-overlapping with existing subnets in an authorized network. For * PRIVATE_SERVICE_ACCESS mode, the name of one allocated IP address ranges * associated with this private service access connection. If not provided, * the service will choose an unused /29 block, for example, 10.0.0.0/29 or * 192.168.0.0/29. For READ_REPLICAS_ENABLED the default block size is /28. */ reservedIpRange?: string; /** * Optional. Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Optional. Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Optional. Additional IP range for node placement. Required when enabling * read replicas on an existing instance. For DIRECT_PEERING mode value must * be a CIDR range of size /28, or "auto". For PRIVATE_SERVICE_ACCESS mode * value must be the name of an allocated address range associated with the * private service access connection, or "auto". */ secondaryIpRange?: string; /** * Output only. List of server CA certificates for the instance. */ readonly serverCaCerts?: TlsCertificate[]; /** * Output only. The current state of this instance. */ readonly state?: | "STATE_UNSPECIFIED" | "CREATING" | "READY" | "UPDATING" | "DELETING" | "REPAIRING" | "MAINTENANCE" | "IMPORTING" | "FAILING_OVER"; /** * Output only. Additional information about the current status of this * instance, if available. */ readonly statusMessage?: string; /** * Optional. reasons that causes instance in "SUSPENDED" state. */ suspensionReasons?: | "SUSPENSION_REASON_UNSPECIFIED" | "CUSTOMER_MANAGED_KEY_ISSUE"[]; /** * Required. The service tier of the instance. */ tier?: | "TIER_UNSPECIFIED" | "BASIC" | "STANDARD_HA"; /** * Optional. The TLS mode of the Redis instance. If not provided, TLS is * disabled for the instance. */ transitEncryptionMode?: | "TRANSIT_ENCRYPTION_MODE_UNSPECIFIED" | "SERVER_AUTHENTICATION" | "DISABLED"; } function serializeInstance(data: any): Instance { return { ...data, persistenceConfig: data["persistenceConfig"] !== undefined ? serializePersistenceConfig(data["persistenceConfig"]) : undefined, }; } function deserializeInstance(data: any): Instance { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, persistenceConfig: data["persistenceConfig"] !== undefined ? deserializePersistenceConfig(data["persistenceConfig"]) : undefined, }; } /** * Instance AUTH string details. */ export interface InstanceAuthString { /** * AUTH string set on the instance. */ authString?: string; } /** * Metadata for individual internal resources in an instance. e.g. spanner * instance can have multiple databases with unique configuration settings. * Similarly bigtable can have multiple clusters within same bigtable instance. */ export interface InternalResourceMetadata { /** * Backup configuration for this database */ backupConfiguration?: BackupConfiguration; /** * Information about the last backup attempt for this database */ backupRun?: BackupRun; product?: Product; resourceId?: DatabaseResourceId; /** * Required. internal resource name for spanner this will be database name * e.g."spanner.googleapis.com/projects/123/abc/instances/inst1/databases/db1" */ resourceName?: string; } function serializeInternalResourceMetadata(data: any): InternalResourceMetadata { return { ...data, backupConfiguration: data["backupConfiguration"] !== undefined ? serializeBackupConfiguration(data["backupConfiguration"]) : undefined, backupRun: data["backupRun"] !== undefined ? serializeBackupRun(data["backupRun"]) : undefined, }; } function deserializeInternalResourceMetadata(data: any): InternalResourceMetadata { return { ...data, backupConfiguration: data["backupConfiguration"] !== undefined ? deserializeBackupConfiguration(data["backupConfiguration"]) : undefined, backupRun: data["backupRun"] !== undefined ? deserializeBackupRun(data["backupRun"]) : undefined, }; } /** * Response for [ListBackupCollections]. */ export interface ListBackupCollectionsResponse { /** * A list of backupCollections in the project. If the `location_id` in the * parent field of the request is "-", all regions available to the project * are queried, and the results aggregated. If in such an aggregated query a * location is unavailable, a placeholder backupCollection entry is included * in the response with the `name` field set to a value of the form * `projects/{project_id}/locations/{location_id}/backupCollections/`- and the * `status` field set to ERROR and `status_message` field set to "location not * available for ListBackupCollections". */ backupCollections?: BackupCollection[]; /** * Token to retrieve the next page of results, or empty if there are no more * results in the list. */ nextPageToken?: string; /** * Locations that could not be reached. */ unreachable?: string[]; } /** * Response for [ListBackups]. */ export interface ListBackupsResponse { /** * A list of backups in the project. */ backups?: Backup[]; /** * Token to retrieve the next page of results, or empty if there are no more * results in the list. */ nextPageToken?: string; /** * Backups that could not be reached. */ unreachable?: string[]; } /** * Response for ListClusters. */ export interface ListClustersResponse { /** * A list of Redis clusters in the project in the specified location, or * across all locations. If the `location_id` in the parent field of the * request is "-", all regions available to the project are queried, and the * results aggregated. If in such an aggregated query a location is * unavailable, a placeholder Redis entry is included in the response with the * `name` field set to a value of the form * `projects/{project_id}/locations/{location_id}/clusters/`- and the `status` * field set to ERROR and `status_message` field set to "location not * available for ListClusters". */ clusters?: Cluster[]; /** * Token to retrieve the next page of results, or empty if there are no more * results in the list. */ nextPageToken?: string; /** * Locations that could not be reached. */ unreachable?: string[]; } function serializeListClustersResponse(data: any): ListClustersResponse { return { ...data, clusters: data["clusters"] !== undefined ? data["clusters"].map((item: any) => (serializeCluster(item))) : undefined, }; } function deserializeListClustersResponse(data: any): ListClustersResponse { return { ...data, clusters: data["clusters"] !== undefined ? data["clusters"].map((item: any) => (deserializeCluster(item))) : undefined, }; } /** * Response for ListInstances. */ export interface ListInstancesResponse { /** * A list of Redis instances in the project in the specified location, or * across all locations. If the `location_id` in the parent field of the * request is "-", all regions available to the project are queried, and the * results aggregated. If in such an aggregated query a location is * unavailable, a placeholder Redis entry is included in the response with the * `name` field set to a value of the form * `projects/{project_id}/locations/{location_id}/instances/`- and the * `status` field set to ERROR and `status_message` field set to "location not * available for ListInstances". */ instances?: Instance[]; /** * Token to retrieve the next page of results, or empty if there are no more * results in the list. */ nextPageToken?: string; /** * Locations that could not be reached. */ unreachable?: string[]; } function serializeListInstancesResponse(data: any): ListInstancesResponse { return { ...data, instances: data["instances"] !== undefined ? data["instances"].map((item: any) => (serializeInstance(item))) : undefined, }; } function deserializeListInstancesResponse(data: any): ListInstancesResponse { return { ...data, instances: data["instances"] !== undefined ? data["instances"].map((item: any) => (deserializeInstance(item))) : undefined, }; } /** * The response message for Locations.ListLocations. */ export interface ListLocationsResponse { /** * A list of locations that matches the specified filter in the request. */ locations?: Location[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * The response message for Operations.ListOperations. */ export interface ListOperationsResponse { /** * The standard List next-page token. */ nextPageToken?: string; /** * A list of operations that matches the specified filter in the request. */ operations?: Operation[]; } /** * A resource that represents a Google Cloud location. */ export interface Location { /** * The friendly name for this location, typically a nearby city name. For * example, "Tokyo". */ displayName?: string; /** * Cross-service attributes for the location. For example * {"cloud.googleapis.com/region": "us-east1"} */ labels?: { [key: string]: string }; /** * Resource ID for the region. For example: "us-east1". */ locationId?: string; /** * Output only. The set of available zones in the location. The map is keyed * by the lowercase ID of each zone, as defined by Compute Engine. These keys * can be specified in `location_id` or `alternative_location_id` fields when * creating a Redis instance. */ metadata?: { [key: string]: any }; /** * Full resource name for the region. For example: * "projects/example-project/locations/us-east1". */ name?: string; } /** * MachineConfiguration describes the configuration of a machine specific to * Database Resource. */ export interface MachineConfiguration { /** * The number of CPUs. TODO(b/342344482, b/342346271) add proto validations * again after bug fix. */ cpuCount?: number; /** * Memory size in bytes. TODO(b/342344482, b/342346271) add proto validations * again after bug fix. */ memorySizeInBytes?: bigint; /** * Optional. Number of shards (if applicable). */ shardCount?: number; /** * Optional. The number of vCPUs. TODO(b/342344482, b/342346271) add proto * validations again after bug fix. */ vcpuCount?: number; } function serializeMachineConfiguration(data: any): MachineConfiguration { return { ...data, memorySizeInBytes: data["memorySizeInBytes"] !== undefined ? String(data["memorySizeInBytes"]) : undefined, }; } function deserializeMachineConfiguration(data: any): MachineConfiguration { return { ...data, memorySizeInBytes: data["memorySizeInBytes"] !== undefined ? BigInt(data["memorySizeInBytes"]) : undefined, }; } /** * Maintenance policy for an instance. */ export interface MaintenancePolicy { /** * Output only. The time when the policy was created. */ readonly createTime?: Date; /** * Optional. Description of what this policy is for. Create/Update methods * return INVALID_ARGUMENT if the length is greater than 512. */ description?: string; /** * Output only. The time when the policy was last updated. */ readonly updateTime?: Date; /** * Optional. Maintenance window that is applied to resources covered by this * policy. Minimum 1. For the current version, the maximum number of * weekly_window is expected to be one. */ weeklyMaintenanceWindow?: WeeklyMaintenanceWindow[]; } /** * Upcoming maintenance schedule. If no maintenance is scheduled, fields are * not populated. */ export interface MaintenanceSchedule { /** * If the scheduled maintenance can be rescheduled, default is true. */ canReschedule?: boolean; /** * Output only. The end time of any upcoming scheduled maintenance for this * instance. */ readonly endTime?: Date; /** * Output only. The deadline that the maintenance schedule start time can not * go beyond, including reschedule. */ readonly scheduleDeadlineTime?: Date; /** * Output only. The start time of any upcoming scheduled maintenance for this * instance. */ readonly startTime?: Date; } /** * Backups that generated and managed by memorystore. */ export interface ManagedBackupSource { /** * Optional. Example: * //redis.googleapis.com/projects/{project}/locations/{location}/backupCollections/{collection}/backups/{backup} * A shorter version (without the prefix) of the backup name is also * supported, like * projects/{project}/locations/{location}/backupCollections/{collection}/backups/{backup_id} * In this case, it assumes the backup is under redis.googleapis.com. */ backup?: string; } export interface ManagedCertificateAuthority { /** * The PEM encoded CA certificate chains for redis managed server * authentication */ caCerts?: CertChain[]; } /** * An output only view of all the member clusters participating in the cross * cluster replication. */ export interface Membership { /** * Output only. The primary cluster that acts as the source of replication * for the secondary clusters. */ readonly primaryCluster?: RemoteCluster; /** * Output only. The list of secondary clusters replicating from the primary * cluster. */ readonly secondaryClusters?: RemoteCluster[]; } /** * Node specific properties. */ export interface NodeInfo { /** * Output only. Node identifying string. e.g. 'node-0', 'node-1' */ readonly id?: string; /** * Output only. Location of the node. */ readonly zone?: string; } export interface ObservabilityMetricData { /** * Required. Type of aggregation performed on the metric. */ aggregationType?: | "AGGREGATION_TYPE_UNSPECIFIED" | "PEAK" | "P99" | "P95" | "CURRENT"; /** * Required. Type of metric like CPU, Memory, etc. */ metricType?: | "METRIC_TYPE_UNSPECIFIED" | "CPU_UTILIZATION" | "MEMORY_UTILIZATION" | "NETWORK_CONNECTIONS" | "STORAGE_UTILIZATION" | "STORAGE_USED_BYTES"; /** * Required. The time the metric value was observed. */ observationTime?: Date; /** * Required. Database resource name associated with the signal. Resource name * to follow CAIS resource_name format as noted here * go/condor-common-datamodel */ resourceName?: string; /** * Required. Value of the metric type. */ value?: TypedValue; } function serializeObservabilityMetricData(data: any): ObservabilityMetricData { return { ...data, observationTime: data["observationTime"] !== undefined ? data["observationTime"].toISOString() : undefined, value: data["value"] !== undefined ? serializeTypedValue(data["value"]) : undefined, }; } function deserializeObservabilityMetricData(data: any): ObservabilityMetricData { return { ...data, observationTime: data["observationTime"] !== undefined ? new Date(data["observationTime"]) : undefined, value: data["value"] !== undefined ? deserializeTypedValue(data["value"]) : undefined, }; } /** * This resource represents a long-running operation that is the result of a * network API call. */ export interface Operation { /** * If the value is `false`, it means the operation is still in progress. If * `true`, the operation is completed, and either `error` or `response` is * available. */ done?: boolean; /** * The error result of the operation in case of failure or cancellation. */ error?: Status; /** * { `createTime`: The time the operation was created. `endTime`: The time * the operation finished running. `target`: Server-defined resource path for * the target of the operation. `verb`: Name of the verb executed by the * operation. `statusDetail`: Human-readable status of the operation, if any. * `cancelRequested`: Identifies whether the user has requested cancellation * of the operation. Operations that have successfully been cancelled have * Operation.error value with a google.rpc.Status.code of 1, corresponding to * `Code.CANCELLED`. `apiVersion`: API version used to start the operation. } */ metadata?: { [key: string]: any }; /** * The server-assigned name, which is only unique within the same service * that originally returns it. If you use the default HTTP mapping, the `name` * should be a resource name ending with `operations/{unique_id}`. */ name?: string; /** * The normal, successful response of the operation. If the original method * returns no data on success, such as `Delete`, the response is * `google.protobuf.Empty`. If the original method is standard * `Get`/`Create`/`Update`, the response should be the resource. For other * methods, the response should have the type `XxxResponse`, where `Xxx` is * the original method name. For example, if the original method name is * `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. */ response?: { [key: string]: any }; } /** * An error that occurred during a backup creation operation. */ export interface OperationError { /** * Identifies the specific error that occurred. REQUIRED */ code?: string; errorType?: | "OPERATION_ERROR_TYPE_UNSPECIFIED" | "KMS_KEY_ERROR" | "DATABASE_ERROR" | "STOCKOUT_ERROR" | "CANCELLATION_ERROR" | "SQLSERVER_ERROR" | "INTERNAL_ERROR"; /** * Additional information about the error encountered. REQUIRED */ message?: string; } /** * Pre-defined metadata fields. */ export interface OperationMetadata { /** * Output only. API version used to start the operation. */ readonly apiVersion?: string; /** * Output only. The time the operation was created. */ readonly createTime?: Date; /** * Output only. The time the operation finished running. */ readonly endTime?: Date; /** * Output only. Identifies whether the user has requested cancellation of the * operation. Operations that have successfully been cancelled have * Operation.error value with a google.rpc.Status.code of 1, corresponding to * `Code.CANCELLED`. */ readonly requestedCancellation?: boolean; /** * Output only. Human-readable status of the operation, if any. */ readonly statusMessage?: string; /** * Output only. Server-defined resource path for the target of the operation. */ readonly target?: string; /** * Output only. Name of the verb executed by the operation. */ readonly verb?: string; } /** * The output content */ export interface OutputConfig { /** * Google Cloud Storage destination for output content. */ gcsDestination?: GcsDestination; } /** * Configuration of the persistence functionality. */ export interface PersistenceConfig { /** * Optional. Controls whether Persistence features are enabled. If not * provided, the existing value will be used. */ persistenceMode?: | "PERSISTENCE_MODE_UNSPECIFIED" | "DISABLED" | "RDB"; /** * Output only. The next time that a snapshot attempt is scheduled to occur. */ readonly rdbNextSnapshotTime?: Date; /** * Optional. Period between RDB snapshots. Snapshots will be attempted every * period starting from the provided snapshot start time. For example, a start * time of 01/01/2033 06:45 and SIX_HOURS snapshot period will do nothing * until 01/01/2033, and then trigger snapshots every day at 06:45, 12:45, * 18:45, and 00:45 the next day, and so on. If not provided, * TWENTY_FOUR_HOURS will be used as default. */ rdbSnapshotPeriod?: | "SNAPSHOT_PERIOD_UNSPECIFIED" | "ONE_HOUR" | "SIX_HOURS" | "TWELVE_HOURS" | "TWENTY_FOUR_HOURS"; /** * Optional. Date and time that the first snapshot was/will be attempted, and * to which future snapshots will be aligned. If not provided, the current * time will be used. */ rdbSnapshotStartTime?: Date; } function serializePersistenceConfig(data: any): PersistenceConfig { return { ...data, rdbSnapshotStartTime: data["rdbSnapshotStartTime"] !== undefined ? data["rdbSnapshotStartTime"].toISOString() : undefined, }; } function deserializePersistenceConfig(data: any): PersistenceConfig { return { ...data, rdbNextSnapshotTime: data["rdbNextSnapshotTime"] !== undefined ? new Date(data["rdbNextSnapshotTime"]) : undefined, rdbSnapshotStartTime: data["rdbSnapshotStartTime"] !== undefined ? new Date(data["rdbSnapshotStartTime"]) : undefined, }; } /** * Product specification for Condor resources. */ export interface Product { /** * The specific engine that the underlying database is running. */ engine?: | "ENGINE_UNSPECIFIED" | "ENGINE_MYSQL" | "MYSQL" | "ENGINE_POSTGRES" | "POSTGRES" | "ENGINE_SQL_SERVER" | "SQL_SERVER" | "ENGINE_NATIVE" | "NATIVE" | "ENGINE_CLOUD_SPANNER_WITH_POSTGRES_DIALECT" | "ENGINE_CLOUD_SPANNER_WITH_GOOGLESQL_DIALECT" | "ENGINE_MEMORYSTORE_FOR_REDIS" | "ENGINE_MEMORYSTORE_FOR_REDIS_CLUSTER" | "ENGINE_OTHER" | "ENGINE_FIRESTORE_WITH_NATIVE_MODE" | "ENGINE_FIRESTORE_WITH_DATASTORE_MODE"; /** * Type of specific database product. It could be CloudSQL, AlloyDB etc.. */ type?: | "PRODUCT_TYPE_UNSPECIFIED" | "PRODUCT_TYPE_CLOUD_SQL" | "CLOUD_SQL" | "PRODUCT_TYPE_ALLOYDB" | "ALLOYDB" | "PRODUCT_TYPE_SPANNER" | "PRODUCT_TYPE_ON_PREM" | "ON_PREM" | "PRODUCT_TYPE_MEMORYSTORE" | "PRODUCT_TYPE_BIGTABLE" | "PRODUCT_TYPE_OTHER" | "PRODUCT_TYPE_FIRESTORE"; /** * Version of the underlying database engine. Example values: For MySQL, it * could be "8.0", "5.7" etc.. For Postgres, it could be "14", "15" etc.. */ version?: string; } /** * Additional options for * Redis#projectsLocationsBackupCollectionsBackupsDelete. */ export interface ProjectsLocationsBackupCollectionsBackupsDeleteOptions { /** * Optional. Idempotent request UUID. */ requestId?: string; } /** * Additional options for Redis#projectsLocationsBackupCollectionsBackupsList. */ export interface ProjectsLocationsBackupCollectionsBackupsListOptions { /** * Optional. The maximum number of items to return. If not specified, a * default value of 1000 will be used by the service. Regardless of the * page_size value, the response may include a partial list and a caller * should only rely on response's `next_page_token` to determine if there are * more clusters left to be queried. */ pageSize?: number; /** * Optional. The `next_page_token` value returned from a previous * [ListBackupCollections] request, if any. */ pageToken?: string; } /** * Additional options for Redis#projectsLocationsBackupCollectionsList. */ export interface ProjectsLocationsBackupCollectionsListOptions { /** * Optional. The maximum number of items to return. If not specified, a * default value of 1000 will be used by the service. Regardless of the * page_size value, the response may include a partial list and a caller * should only rely on response's `next_page_token` to determine if there are * more clusters left to be queried. */ pageSize?: number; /** * Optional. The `next_page_token` value returned from a previous * [ListBackupCollections] request, if any. */ pageToken?: string; } /** * Additional options for Redis#projectsLocationsClustersCreate. */ export interface ProjectsLocationsClustersCreateOptions { /** * Required. The logical name of the Redis cluster in the customer project * with the following restrictions: * Must contain only lowercase letters, * numbers, and hyphens. * Must start with a letter. * Must be between 1-63 * characters. * Must end with a number or a letter. * Must be unique within * the customer project / location */ clusterId?: string; /** * Idempotent request UUID. */ requestId?: string; } /** * Additional options for Redis#projectsLocationsClustersDelete. */ export interface ProjectsLocationsClustersDeleteOptions { /** * Idempotent request UUID. */ requestId?: string; } /** * Additional options for Redis#projectsLocationsClustersList. */ export interface ProjectsLocationsClustersListOptions { /** * The maximum number of items to return. If not specified, a default value * of 1000 will be used by the service. Regardless of the page_size value, the * response may include a partial list and a caller should only rely on * response's `next_page_token` to determine if there are more clusters left * to be queried. */ pageSize?: number; /** * The `next_page_token` value returned from a previous ListClusters request, * if any. */ pageToken?: string; } /** * Additional options for Redis#projectsLocationsClustersPatch. */ export interface ProjectsLocationsClustersPatchOptions { /** * Idempotent request UUID. */ requestId?: string; /** * Required. Mask of fields to update. At least one path must be supplied in * this field. The elements of the repeated paths field may only include these * fields from Cluster: * `size_gb` * `replica_count` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsClustersPatchOptions(data: any): ProjectsLocationsClustersPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsClustersPatchOptions(data: any): ProjectsLocationsClustersPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for Redis#projectsLocationsInstancesCreate. */ export interface ProjectsLocationsInstancesCreateOptions { /** * Required. The logical name of the Redis instance in the customer project * with the following restrictions: * Must contain only lowercase letters, * numbers, and hyphens. * Must start with a letter. * Must be between 1-40 * characters. * Must end with a number or a letter. * Must be unique within * the customer project / location */ instanceId?: string; } /** * Additional options for Redis#projectsLocationsInstancesList. */ export interface ProjectsLocationsInstancesListOptions { /** * The maximum number of items to return. If not specified, a default value * of 1000 will be used by the service. Regardless of the page_size value, the * response may include a partial list and a caller should only rely on * response's `next_page_token` to determine if there are more instances left * to be queried. */ pageSize?: number; /** * The `next_page_token` value returned from a previous ListInstances * request, if any. */ pageToken?: string; } /** * Additional options for Redis#projectsLocationsInstancesPatch. */ export interface ProjectsLocationsInstancesPatchOptions { /** * Required. Mask of fields to update. At least one path must be supplied in * this field. The elements of the repeated paths field may only include these * fields from Instance: * `displayName` * `labels` * `memorySizeGb` * * `redisConfig` * `replica_count` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsInstancesPatchOptions(data: any): ProjectsLocationsInstancesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsInstancesPatchOptions(data: any): ProjectsLocationsInstancesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for Redis#projectsLocationsList. */ export interface ProjectsLocationsListOptions { /** * A filter to narrow down results to a preferred subset. The filtering * language accepts strings like `"displayName=tokyo"`, and is documented in * more detail in [AIP-160](https://google.aip.dev/160). */ filter?: string; /** * The maximum number of results to return. If not set, the service selects a * default. */ pageSize?: number; /** * A page token received from the `next_page_token` field in the response. * Send that page token to receive the subsequent page. */ pageToken?: string; } /** * Additional options for Redis#projectsLocationsOperationsList. */ export interface ProjectsLocationsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } export interface PscConfig { /** * Required. The network where the IP address of the discovery endpoint will * be reserved, in the form of * projects/{network_project}/global/networks/{network_id}. */ network?: string; } /** * Details of consumer resources in a PSC connection. */ export interface PscConnection { /** * Required. The IP allocated on the consumer network for the PSC forwarding * rule. */ address?: string; /** * Output only. Type of the PSC connection. */ readonly connectionType?: | "CONNECTION_TYPE_UNSPECIFIED" | "CONNECTION_TYPE_DISCOVERY" | "CONNECTION_TYPE_PRIMARY" | "CONNECTION_TYPE_READER"; /** * Required. The URI of the consumer side forwarding rule. Example: * projects/{projectNumOrId}/regions/us-east1/forwardingRules/{resourceId}. */ forwardingRule?: string; /** * Required. The consumer network where the IP address resides, in the form * of projects/{project_id}/global/networks/{network_id}. */ network?: string; /** * Optional. Project ID of the consumer project where the forwarding rule is * created in. */ projectId?: string; /** * Required. The PSC connection id of the forwarding rule connected to the * service attachment. */ pscConnectionId?: string; /** * Output only. The status of the PSC connection. Please note that this value * is updated periodically. To get the latest status of a PSC connection, * follow * https://cloud.google.com/vpc/docs/configure-private-service-connect-services#endpoint-details. */ readonly pscConnectionStatus?: | "PSC_CONNECTION_STATUS_UNSPECIFIED" | "PSC_CONNECTION_STATUS_ACTIVE" | "PSC_CONNECTION_STATUS_NOT_FOUND"; /** * Required. The service attachment which is the target of the PSC * connection, in the form of * projects/{project-id}/regions/{region}/serviceAttachments/{service-attachment-id}. */ serviceAttachment?: string; } /** * Configuration of a service attachment of the cluster, for creating PSC * connections. */ export interface PscServiceAttachment { /** * Output only. Type of a PSC connection targeting this service attachment. */ readonly connectionType?: | "CONNECTION_TYPE_UNSPECIFIED" | "CONNECTION_TYPE_DISCOVERY" | "CONNECTION_TYPE_PRIMARY" | "CONNECTION_TYPE_READER"; /** * Output only. Service attachment URI which your self-created PscConnection * should use as target */ readonly serviceAttachment?: string; } /** * Configuration of the RDB based persistence. */ export interface RDBConfig { /** * Optional. Period between RDB snapshots. */ rdbSnapshotPeriod?: | "SNAPSHOT_PERIOD_UNSPECIFIED" | "ONE_HOUR" | "SIX_HOURS" | "TWELVE_HOURS" | "TWENTY_FOUR_HOURS"; /** * Optional. The time that the first snapshot was/will be attempted, and to * which future snapshots will be aligned. If not provided, the current time * will be used. */ rdbSnapshotStartTime?: Date; } function serializeRDBConfig(data: any): RDBConfig { return { ...data, rdbSnapshotStartTime: data["rdbSnapshotStartTime"] !== undefined ? data["rdbSnapshotStartTime"].toISOString() : undefined, }; } function deserializeRDBConfig(data: any): RDBConfig { return { ...data, rdbSnapshotStartTime: data["rdbSnapshotStartTime"] !== undefined ? new Date(data["rdbSnapshotStartTime"]) : undefined, }; } /** * Operation metadata returned by the CLH during resource state reconciliation. */ export interface ReconciliationOperationMetadata { /** * DEPRECATED. Use exclusive_action instead. */ deleteResource?: boolean; /** * Excluisive action returned by the CLH. */ exclusiveAction?: | "UNKNOWN_REPAIR_ACTION" | "DELETE" | "RETRY"; } /** * Details of the remote cluster associated with this cluster in a cross * cluster replication setup. */ export interface RemoteCluster { /** * The full resource path of the remote cluster in the format: * projects//locations//clusters/ */ cluster?: string; /** * Output only. The unique identifier of the remote cluster. */ readonly uid?: string; } /** * Request for rescheduling a cluster maintenance. */ export interface RescheduleClusterMaintenanceRequest { /** * Required. If reschedule type is SPECIFIC_TIME, must set up schedule_time * as well. */ rescheduleType?: | "RESCHEDULE_TYPE_UNSPECIFIED" | "IMMEDIATE" | "SPECIFIC_TIME"; /** * Optional. Timestamp when the maintenance shall be rescheduled to if * reschedule_type=SPECIFIC_TIME, in RFC 3339 format, for example * `2012-11-15T16:19:00.094Z`. */ scheduleTime?: Date; } function serializeRescheduleClusterMaintenanceRequest(data: any): RescheduleClusterMaintenanceRequest { return { ...data, scheduleTime: data["scheduleTime"] !== undefined ? data["scheduleTime"].toISOString() : undefined, }; } function deserializeRescheduleClusterMaintenanceRequest(data: any): RescheduleClusterMaintenanceRequest { return { ...data, scheduleTime: data["scheduleTime"] !== undefined ? new Date(data["scheduleTime"]) : undefined, }; } /** * Request for RescheduleMaintenance. */ export interface RescheduleMaintenanceRequest { /** * Required. If reschedule type is SPECIFIC_TIME, must set up schedule_time * as well. */ rescheduleType?: | "RESCHEDULE_TYPE_UNSPECIFIED" | "IMMEDIATE" | "NEXT_AVAILABLE_WINDOW" | "SPECIFIC_TIME"; /** * Optional. Timestamp when the maintenance shall be rescheduled to if * reschedule_type=SPECIFIC_TIME, in RFC 3339 format, for example * `2012-11-15T16:19:00.094Z`. */ scheduleTime?: Date; } function serializeRescheduleMaintenanceRequest(data: any): RescheduleMaintenanceRequest { return { ...data, scheduleTime: data["scheduleTime"] !== undefined ? data["scheduleTime"].toISOString() : undefined, }; } function deserializeRescheduleMaintenanceRequest(data: any): RescheduleMaintenanceRequest { return { ...data, scheduleTime: data["scheduleTime"] !== undefined ? new Date(data["scheduleTime"]) : undefined, }; } export interface RetentionSettings { /** * Duration based retention period i.e. 172800 seconds (2 days) */ durationBasedRetention?: number /* Duration */; quantityBasedRetention?: number; /** * The unit that 'retained_backups' represents. */ retentionUnit?: | "RETENTION_UNIT_UNSPECIFIED" | "COUNT" | "TIME" | "DURATION" | "RETENTION_UNIT_OTHER"; timeBasedRetention?: number /* Duration */; /** * Timestamp based retention period i.e. 2024-05-01T00:00:00Z */ timestampBasedRetentionTime?: Date; } function serializeRetentionSettings(data: any): RetentionSettings { return { ...data, durationBasedRetention: data["durationBasedRetention"] !== undefined ? data["durationBasedRetention"] : undefined, timeBasedRetention: data["timeBasedRetention"] !== undefined ? data["timeBasedRetention"] : undefined, timestampBasedRetentionTime: data["timestampBasedRetentionTime"] !== undefined ? data["timestampBasedRetentionTime"].toISOString() : undefined, }; } function deserializeRetentionSettings(data: any): RetentionSettings { return { ...data, durationBasedRetention: data["durationBasedRetention"] !== undefined ? data["durationBasedRetention"] : undefined, timeBasedRetention: data["timeBasedRetention"] !== undefined ? data["timeBasedRetention"] : undefined, timestampBasedRetentionTime: data["timestampBasedRetentionTime"] !== undefined ? new Date(data["timestampBasedRetentionTime"]) : undefined, }; } /** * Represents additional information about the state of the cluster. */ export interface StateInfo { /** * Describes ongoing update on the cluster when cluster state is UPDATING. */ updateInfo?: UpdateInfo; } /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains three * pieces of data: error code, error message, and error details. You can find * out more about this error model and how to work with it in the [API Design * Guide](https://cloud.google.com/apis/design/errors). */ export interface Status { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. */ details?: { [key: string]: any }[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } /** * Message type for storing tags. Tags provide a way to create annotations for * resources, and in some cases conditionally allow or deny policies based on * whether a resource has a specific tag. */ export interface Tags { /** * The Tag key/value mappings. */ tags?: { [key: string]: string }; } /** * Represents a time of day. The date and time zone are either not significant * or are specified elsewhere. An API may choose to allow leap seconds. Related * types are google.type.Date and `google.protobuf.Timestamp`. */ export interface TimeOfDay { /** * Hours of a day in 24 hour format. Must be greater than or equal to 0 and * typically must be less than or equal to 23. An API may choose to allow the * value "24:00:00" for scenarios like business closing time. */ hours?: number; /** * Minutes of an hour. Must be greater than or equal to 0 and less than or * equal to 59. */ minutes?: number; /** * Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 * and less than or equal to 999,999,999. */ nanos?: number; /** * Seconds of a minute. Must be greater than or equal to 0 and typically must * be less than or equal to 59. An API may allow the value 60 if it allows * leap-seconds. */ seconds?: number; } /** * TlsCertificate Resource */ export interface TlsCertificate { /** * PEM representation. */ cert?: string; /** * Output only. The time when the certificate was created in [RFC * 3339](https://tools.ietf.org/html/rfc3339) format, for example * `2020-05-18T00:00:00.094Z`. */ readonly createTime?: Date; /** * Output only. The time when the certificate expires in [RFC * 3339](https://tools.ietf.org/html/rfc3339) format, for example * `2020-05-18T00:00:00.094Z`. */ readonly expireTime?: Date; /** * Serial number, as extracted from the certificate. */ serialNumber?: string; /** * Sha1 Fingerprint of the certificate. */ sha1Fingerprint?: string; } /** * TypedValue represents the value of a metric type. It can either be a double, * an int64, a string or a bool. */ export interface TypedValue { /** * For boolean value */ boolValue?: boolean; /** * For double value */ doubleValue?: number; /** * For integer value */ int64Value?: bigint; /** * For string value */ stringValue?: string; } function serializeTypedValue(data: any): TypedValue { return { ...data, int64Value: data["int64Value"] !== undefined ? String(data["int64Value"]) : undefined, }; } function deserializeTypedValue(data: any): TypedValue { return { ...data, int64Value: data["int64Value"] !== undefined ? BigInt(data["int64Value"]) : undefined, }; } /** * Represents information about an updating cluster. */ export interface UpdateInfo { /** * Target number of replica nodes per shard. */ targetReplicaCount?: number; /** * Target number of shards for redis cluster */ targetShardCount?: number; } /** * Request for UpgradeInstance. */ export interface UpgradeInstanceRequest { /** * Required. Specifies the target version of Redis software to upgrade to. */ redisVersion?: string; } /** * Message type for storing user labels. User labels are used to tag App Engine * resources, allowing users to search for resources matching a set of labels * and to aggregate usage data by labels. */ export interface UserLabels { labels?: { [key: string]: string }; } /** * Time window in which disruptive maintenance updates occur. Non-disruptive * updates can occur inside or outside this window. */ export interface WeeklyMaintenanceWindow { /** * Required. The day of week that maintenance updates occur. */ day?: | "DAY_OF_WEEK_UNSPECIFIED" | "MONDAY" | "TUESDAY" | "WEDNESDAY" | "THURSDAY" | "FRIDAY" | "SATURDAY" | "SUNDAY"; /** * Output only. Duration of the maintenance window. The current window is * fixed at 1 hour. */ readonly duration?: number /* Duration */; /** * Required. Start time of the window in UTC time. */ startTime?: TimeOfDay; } /** * Zone distribution config for allocation of cluster resources. */ export interface ZoneDistributionConfig { /** * Optional. The mode of zone distribution. Defaults to MULTI_ZONE, when not * specified. */ mode?: | "ZONE_DISTRIBUTION_MODE_UNSPECIFIED" | "MULTI_ZONE" | "SINGLE_ZONE"; /** * Optional. When SINGLE ZONE distribution is selected, zone field would be * used to allocate all resources in that zone. This is not applicable to * MULTI_ZONE, and would be ignored for MULTI_ZONE clusters. */ zone?: string; }