// Copyright 2022 Luca Casonato. All rights reserved. MIT license. /** * Datastream API Client for Deno * ============================== * * * * Docs: https://cloud.google.com/datastream/ * Source: https://googleapis.deno.dev/v1/datastream:v1.ts */ import { auth, CredentialsClient, GoogleAuth, request } from "/_/base@v1/mod.ts"; export { auth, GoogleAuth }; export type { CredentialsClient }; export class Datastream { #client: CredentialsClient | undefined; #baseUrl: string; constructor(client?: CredentialsClient, baseUrl: string = "https://datastream.googleapis.com/") { this.#client = client; this.#baseUrl = baseUrl; } /** * Use this method to create a connection profile in a project and location. * * @param parent Required. The parent that owns the collection of ConnectionProfiles. */ async projectsLocationsConnectionProfilesCreate(parent: string, req: ConnectionProfile, opts: ProjectsLocationsConnectionProfilesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/connectionProfiles`); if (opts.connectionProfileId !== undefined) { url.searchParams.append("connectionProfileId", String(opts.connectionProfileId)); } if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } if (opts.validateOnly !== undefined) { url.searchParams.append("validateOnly", String(opts.validateOnly)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Use this method to delete a connection profile. * * @param name Required. The name of the connection profile resource to delete. */ async projectsLocationsConnectionProfilesDelete(name: string, opts: ProjectsLocationsConnectionProfilesDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Operation; } /** * Use this method to discover a connection profile. The discover API call * exposes the data objects and metadata belonging to the profile. Typically, * a request returns children data objects of a parent data object that's * optionally supplied in the request. * * @param parent Required. The parent resource of the connection profile type. Must be in the format `projects/*/locations/*`. */ async projectsLocationsConnectionProfilesDiscover(parent: string, req: DiscoverConnectionProfileRequest): Promise { req = serializeDiscoverConnectionProfileRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/connectionProfiles:discover`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeDiscoverConnectionProfileResponse(data); } /** * Use this method to get details about a connection profile. * * @param name Required. The name of the connection profile resource to get. */ async projectsLocationsConnectionProfilesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ConnectionProfile; } /** * Use this method to list connection profiles created in a project and * location. * * @param parent Required. The parent that owns the collection of connection profiles. */ async projectsLocationsConnectionProfilesList(parent: string, opts: ProjectsLocationsConnectionProfilesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/connectionProfiles`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListConnectionProfilesResponse; } /** * Use this method to update the parameters of a connection profile. * * @param name Output only. Identifier. The resource's name. */ async projectsLocationsConnectionProfilesPatch(name: string, req: ConnectionProfile, opts: ProjectsLocationsConnectionProfilesPatchOptions = {}): Promise { opts = serializeProjectsLocationsConnectionProfilesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } if (opts.validateOnly !== undefined) { url.searchParams.append("validateOnly", String(opts.validateOnly)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as Operation; } /** * The FetchStaticIps API call exposes the static IP addresses used by * Datastream. * * @param name Required. The resource name for the location for which static IPs should be returned. Must be in the format `projects/*/locations/*`. */ async projectsLocationsFetchStaticIps(name: string, opts: ProjectsLocationsFetchStaticIpsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:fetchStaticIps`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as FetchStaticIpsResponse; } /** * Gets information about a location. * * @param name Resource name for the location. */ async projectsLocationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as Location; } /** * Lists information about the supported locations for this service. This * method can be called in two ways: * **List all public locations:** Use the * path `GET /v1/locations`. * **List project-visible locations:** Use the * path `GET /v1/projects/{project_id}/locations`. This may include public * locations as well as private or other locations specifically visible to the * project. * * @param name The resource that owns the locations collection, if applicable. */ async projectsLocationsList(name: string, opts: ProjectsLocationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/locations`); if (opts.extraLocationTypes !== undefined) { url.searchParams.append("extraLocationTypes", String(opts.extraLocationTypes)); } if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListLocationsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsOperationsCancel(name: string, req: CancelOperationRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Empty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Empty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as Operation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsOperationsList(name: string, opts: ProjectsLocationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.returnPartialSuccess !== undefined) { url.searchParams.append("returnPartialSuccess", String(opts.returnPartialSuccess)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListOperationsResponse; } /** * Use this method to create a private connectivity configuration. * * @param parent Required. The parent that owns the collection of PrivateConnections. */ async projectsLocationsPrivateConnectionsCreate(parent: string, req: PrivateConnection, opts: ProjectsLocationsPrivateConnectionsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/privateConnections`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } if (opts.privateConnectionId !== undefined) { url.searchParams.append("privateConnectionId", String(opts.privateConnectionId)); } if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } if (opts.validateOnly !== undefined) { url.searchParams.append("validateOnly", String(opts.validateOnly)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Use this method to delete a private connectivity configuration. * * @param name Required. The name of the private connectivity configuration to delete. */ async projectsLocationsPrivateConnectionsDelete(name: string, opts: ProjectsLocationsPrivateConnectionsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Operation; } /** * Use this method to get details about a private connectivity configuration. * * @param name Required. The name of the private connectivity configuration to get. */ async projectsLocationsPrivateConnectionsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as PrivateConnection; } /** * Use this method to list private connectivity configurations in a project * and location. * * @param parent Required. The parent that owns the collection of private connectivity configurations. */ async projectsLocationsPrivateConnectionsList(parent: string, opts: ProjectsLocationsPrivateConnectionsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/privateConnections`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListPrivateConnectionsResponse; } /** * Use this method to create a route for a private connectivity configuration * in a project and location. * * @param parent Required. The parent that owns the collection of Routes. */ async projectsLocationsPrivateConnectionsRoutesCreate(parent: string, req: Route, opts: ProjectsLocationsPrivateConnectionsRoutesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/routes`); if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } if (opts.routeId !== undefined) { url.searchParams.append("routeId", String(opts.routeId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Use this method to delete a route. * * @param name Required. The name of the Route resource to delete. */ async projectsLocationsPrivateConnectionsRoutesDelete(name: string, opts: ProjectsLocationsPrivateConnectionsRoutesDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Operation; } /** * Use this method to get details about a route. * * @param name Required. The name of the Route resource to get. */ async projectsLocationsPrivateConnectionsRoutesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as Route; } /** * Use this method to list routes created for a private connectivity * configuration in a project and location. * * @param parent Required. The parent that owns the collection of Routess. */ async projectsLocationsPrivateConnectionsRoutesList(parent: string, opts: ProjectsLocationsPrivateConnectionsRoutesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/routes`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListRoutesResponse; } /** * Use this method to create a stream. * * @param parent Required. The parent that owns the collection of streams. */ async projectsLocationsStreamsCreate(parent: string, req: Stream, opts: ProjectsLocationsStreamsCreateOptions = {}): Promise { req = serializeStream(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/streams`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } if (opts.streamId !== undefined) { url.searchParams.append("streamId", String(opts.streamId)); } if (opts.validateOnly !== undefined) { url.searchParams.append("validateOnly", String(opts.validateOnly)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } /** * Use this method to delete a stream. * * @param name Required. The name of the stream resource to delete. */ async projectsLocationsStreamsDelete(name: string, opts: ProjectsLocationsStreamsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as Operation; } /** * Use this method to get details about a stream. * * @param name Required. The name of the stream resource to get. */ async projectsLocationsStreamsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeStream(data); } /** * Use this method to list streams in a project and location. * * @param parent Required. The parent that owns the collection of streams. */ async projectsLocationsStreamsList(parent: string, opts: ProjectsLocationsStreamsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/streams`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeListStreamsResponse(data); } /** * Use this method to get details about a stream object. * * @param name Required. The name of the stream object resource to get. */ async projectsLocationsStreamsObjectsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as StreamObject; } /** * Use this method to list the objects of a specific stream. * * @param parent Required. The parent stream that owns the collection of objects. */ async projectsLocationsStreamsObjectsList(parent: string, opts: ProjectsLocationsStreamsObjectsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/objects`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as ListStreamObjectsResponse; } /** * Use this method to look up a stream object by its source object * identifier. * * @param parent Required. The parent stream that owns the collection of objects. */ async projectsLocationsStreamsObjectsLookup(parent: string, req: LookupStreamObjectRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/objects:lookup`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as StreamObject; } /** * Use this method to start a backfill job for the specified stream object. * * @param object Required. The name of the stream object resource to start a backfill job for. */ async projectsLocationsStreamsObjectsStartBackfillJob(object: string, req: StartBackfillJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ object }:startBackfillJob`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as StartBackfillJobResponse; } /** * Use this method to stop a backfill job for the specified stream object. * * @param object Required. The name of the stream object resource to stop the backfill job for. */ async projectsLocationsStreamsObjectsStopBackfillJob(object: string, req: StopBackfillJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ object }:stopBackfillJob`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as StopBackfillJobResponse; } /** * Use this method to update the configuration of a stream. * * @param name Output only. Identifier. The stream's name. */ async projectsLocationsStreamsPatch(name: string, req: Stream, opts: ProjectsLocationsStreamsPatchOptions = {}): Promise { req = serializeStream(req); opts = serializeProjectsLocationsStreamsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } if (opts.requestId !== undefined) { url.searchParams.append("requestId", String(opts.requestId)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } if (opts.validateOnly !== undefined) { url.searchParams.append("validateOnly", String(opts.validateOnly)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as Operation; } /** * Use this method to start, resume or recover a stream with a non default * CDC strategy. * * @param name Required. Name of the stream resource to start, in the format: projects/{project_id}/locations/{location}/streams/{stream_name} */ async projectsLocationsStreamsRun(name: string, req: RunStreamRequest): Promise { req = serializeRunStreamRequest(req); const url = new URL(`${this.#baseUrl}v1/${ name }:run`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as Operation; } } /** * AppendOnly mode defines that all changes to a table will be written to the * destination table. */ export interface AppendOnly { } /** * AVRO file format configuration. */ export interface AvroFileFormat { } /** * Backfill strategy to automatically backfill the Stream's objects. Specific * objects can be excluded. */ export interface BackfillAllStrategy { /** * MongoDB data source objects to avoid backfilling */ mongodbExcludedObjects?: MongodbCluster; /** * MySQL data source objects to avoid backfilling. */ mysqlExcludedObjects?: MysqlRdbms; /** * Oracle data source objects to avoid backfilling. */ oracleExcludedObjects?: OracleRdbms; /** * PostgreSQL data source objects to avoid backfilling. */ postgresqlExcludedObjects?: PostgresqlRdbms; /** * Salesforce data source objects to avoid backfilling */ salesforceExcludedObjects?: SalesforceOrg; /** * Spanner data source objects to avoid backfilling. */ spannerExcludedObjects?: SpannerDatabase; /** * SQLServer data source objects to avoid backfilling */ sqlServerExcludedObjects?: SqlServerRdbms; } function serializeBackfillAllStrategy(data: any): BackfillAllStrategy { return { ...data, spannerExcludedObjects: data["spannerExcludedObjects"] !== undefined ? serializeSpannerDatabase(data["spannerExcludedObjects"]) : undefined, }; } function deserializeBackfillAllStrategy(data: any): BackfillAllStrategy { return { ...data, spannerExcludedObjects: data["spannerExcludedObjects"] !== undefined ? deserializeSpannerDatabase(data["spannerExcludedObjects"]) : undefined, }; } /** * Represents a backfill job on a specific stream object. */ export interface BackfillJob { /** * Output only. Errors which caused the backfill job to fail. */ readonly errors?: Error[]; /** * Output only. Backfill job's end time. */ readonly lastEndTime?: Date; /** * Output only. Backfill job's start time. */ readonly lastStartTime?: Date; /** * Output only. Backfill job state. */ readonly state?: | "STATE_UNSPECIFIED" | "NOT_STARTED" | "PENDING" | "ACTIVE" | "STOPPED" | "FAILED" | "COMPLETED" | "UNSUPPORTED"; /** * Backfill job's triggering reason. */ trigger?: | "TRIGGER_UNSPECIFIED" | "AUTOMATIC" | "MANUAL"; } /** * Backfill strategy to disable automatic backfill for the Stream's objects. */ export interface BackfillNoneStrategy { } /** * Describes violations in a client request. This error type focuses on the * syntactic aspects of the request. */ export interface BadRequest { /** * Describes all violations in a client request. */ fieldViolations?: FieldViolation[]; } /** * Message to represent the option where Datastream will enforce encryption * without authenticating server identity. Server certificates will be trusted * by default. */ export interface BasicEncryption { } /** * BigQuery clustering configuration. */ export interface BigQueryClustering { /** * Required. Column names to set as clustering columns. */ columns?: string[]; } /** * BigQuery destination configuration */ export interface BigQueryDestinationConfig { /** * Append only mode */ appendOnly?: AppendOnly; /** * Optional. Big Lake Managed Tables (BLMT) configuration. */ blmtConfig?: BlmtConfig; /** * The guaranteed data freshness (in seconds) when querying tables created by * the stream. Editing this field will only affect new tables created in the * future, but existing tables will not be impacted. Lower values mean that * queries will return fresher data, but may result in higher cost. */ dataFreshness?: number /* Duration */; /** * The standard mode */ merge?: Merge; /** * Single destination dataset. */ singleTargetDataset?: SingleTargetDataset; /** * Source hierarchy datasets. */ sourceHierarchyDatasets?: SourceHierarchyDatasets; } function serializeBigQueryDestinationConfig(data: any): BigQueryDestinationConfig { return { ...data, dataFreshness: data["dataFreshness"] !== undefined ? data["dataFreshness"] : undefined, }; } function deserializeBigQueryDestinationConfig(data: any): BigQueryDestinationConfig { return { ...data, dataFreshness: data["dataFreshness"] !== undefined ? data["dataFreshness"] : undefined, }; } /** * BigQuery partitioning configuration. */ export interface BigQueryPartitioning { /** * Ingestion time partitioning. */ ingestionTimePartition?: IngestionTimePartition; /** * Integer range partitioning. */ integerRangePartition?: IntegerRangePartition; /** * Optional. If true, queries over the table require a partition filter. */ requirePartitionFilter?: boolean; /** * Time unit column partitioning. */ timeUnitPartition?: TimeUnitPartition; } function serializeBigQueryPartitioning(data: any): BigQueryPartitioning { return { ...data, integerRangePartition: data["integerRangePartition"] !== undefined ? serializeIntegerRangePartition(data["integerRangePartition"]) : undefined, }; } function deserializeBigQueryPartitioning(data: any): BigQueryPartitioning { return { ...data, integerRangePartition: data["integerRangePartition"] !== undefined ? deserializeIntegerRangePartition(data["integerRangePartition"]) : undefined, }; } /** * Profile for connecting to a BigQuery destination. */ export interface BigQueryProfile { } /** * Configuration to use Binary Log Parser CDC technique. */ export interface BinaryLogParser { /** * Use Oracle directories. */ logFileDirectories?: LogFileDirectories; /** * Use Oracle ASM. */ oracleAsmLogFileAccess?: OracleAsmLogFileAccess; } /** * Use Binary log position based replication. */ export interface BinaryLogPosition { } /** * The configuration for BLMT. */ export interface BlmtConfig { /** * Required. The Cloud Storage bucket name. */ bucket?: string; /** * Required. The bigquery connection. Format: `{project}.{location}.{name}` */ connectionName?: string; /** * Required. The file format. */ fileFormat?: | "FILE_FORMAT_UNSPECIFIED" | "PARQUET"; /** * The root path inside the Cloud Storage bucket. */ rootPath?: string; /** * Required. The table format. */ tableFormat?: | "TABLE_FORMAT_UNSPECIFIED" | "ICEBERG"; } /** * The request message for Operations.CancelOperation. */ export interface CancelOperationRequest { } /** * The strategy that the stream uses for CDC replication. */ export interface CdcStrategy { /** * Optional. Start replicating from the most recent position in the source. */ mostRecentStartPosition?: MostRecentStartPosition; /** * Optional. Resume replication from the next available position in the * source. */ nextAvailableStartPosition?: NextAvailableStartPosition; /** * Optional. Start replicating from a specific position in the source. */ specificStartPosition?: SpecificStartPosition; } function serializeCdcStrategy(data: any): CdcStrategy { return { ...data, specificStartPosition: data["specificStartPosition"] !== undefined ? serializeSpecificStartPosition(data["specificStartPosition"]) : undefined, }; } function deserializeCdcStrategy(data: any): CdcStrategy { return { ...data, specificStartPosition: data["specificStartPosition"] !== undefined ? deserializeSpecificStartPosition(data["specificStartPosition"]) : undefined, }; } /** * A set of reusable connection configurations to be used as a source or * destination for a stream. */ export interface ConnectionProfile { /** * Profile for connecting to a BigQuery destination. */ bigqueryProfile?: BigQueryProfile; /** * Output only. The create time of the resource. */ readonly createTime?: Date; /** * Required. Display name. */ displayName?: string; /** * Forward SSH tunnel connectivity. */ forwardSshConnectivity?: ForwardSshTunnelConnectivity; /** * Profile for connecting to a Cloud Storage destination. */ gcsProfile?: GcsProfile; /** * Labels. */ labels?: { [key: string]: string }; /** * Profile for connecting to a MongoDB source. */ mongodbProfile?: MongodbProfile; /** * Profile for connecting to a MySQL source. */ mysqlProfile?: MysqlProfile; /** * Output only. Identifier. The resource's name. */ readonly name?: string; /** * Profile for connecting to an Oracle source. */ oracleProfile?: OracleProfile; /** * Profile for connecting to a PostgreSQL source. */ postgresqlProfile?: PostgresqlProfile; /** * Private connectivity. */ privateConnectivity?: PrivateConnectivity; /** * Profile for connecting to a Salesforce source. */ salesforceProfile?: SalesforceProfile; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Profile for connecting to a Spanner source. */ spannerProfile?: SpannerProfile; /** * Profile for connecting to a SQLServer source. */ sqlServerProfile?: SqlServerProfile; /** * Static Service IP connectivity. */ staticServiceIpConnectivity?: StaticServiceIpConnectivity; /** * Output only. The update time of the resource. */ readonly updateTime?: Date; } /** * A customization rule to apply to a set of objects. */ export interface CustomizationRule { /** * BigQuery clustering rule. */ bigqueryClustering?: BigQueryClustering; /** * BigQuery partitioning rule. */ bigqueryPartitioning?: BigQueryPartitioning; } function serializeCustomizationRule(data: any): CustomizationRule { return { ...data, bigqueryPartitioning: data["bigqueryPartitioning"] !== undefined ? serializeBigQueryPartitioning(data["bigqueryPartitioning"]) : undefined, }; } function deserializeCustomizationRule(data: any): CustomizationRule { return { ...data, bigqueryPartitioning: data["bigqueryPartitioning"] !== undefined ? deserializeBigQueryPartitioning(data["bigqueryPartitioning"]) : undefined, }; } /** * Dataset template used for dynamic dataset creation. */ export interface DatasetTemplate { /** * If supplied, every created dataset will have its name prefixed by the * provided value. The prefix and name will be separated by an underscore. * i.e. _. */ datasetIdPrefix?: string; /** * Describes the Cloud KMS encryption key that will be used to protect * destination BigQuery table. The BigQuery Service Account associated with * your project requires access to this encryption key. i.e. * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{cryptoKey}. * See https://cloud.google.com/bigquery/docs/customer-managed-encryption for * more information. */ kmsKeyName?: string; /** * Required. The geographic location where the dataset should reside. See * https://cloud.google.com/bigquery/docs/locations for supported locations. */ location?: string; } /** * Describes additional debugging info. */ export interface DebugInfo { /** * Additional debugging information provided by the server. */ detail?: string; /** * The stack trace entries indicating where the error occurred. */ stackEntries?: string[]; } /** * The configuration of the stream destination. */ export interface DestinationConfig { /** * BigQuery destination configuration. */ bigqueryDestinationConfig?: BigQueryDestinationConfig; /** * Required. Destination connection profile resource. Format: * `projects/{project}/locations/{location}/connectionProfiles/{name}` */ destinationConnectionProfile?: string; /** * A configuration for how data should be loaded to Cloud Storage. */ gcsDestinationConfig?: GcsDestinationConfig; } function serializeDestinationConfig(data: any): DestinationConfig { return { ...data, bigqueryDestinationConfig: data["bigqueryDestinationConfig"] !== undefined ? serializeBigQueryDestinationConfig(data["bigqueryDestinationConfig"]) : undefined, gcsDestinationConfig: data["gcsDestinationConfig"] !== undefined ? serializeGcsDestinationConfig(data["gcsDestinationConfig"]) : undefined, }; } function deserializeDestinationConfig(data: any): DestinationConfig { return { ...data, bigqueryDestinationConfig: data["bigqueryDestinationConfig"] !== undefined ? deserializeBigQueryDestinationConfig(data["bigqueryDestinationConfig"]) : undefined, gcsDestinationConfig: data["gcsDestinationConfig"] !== undefined ? deserializeGcsDestinationConfig(data["gcsDestinationConfig"]) : undefined, }; } /** * Request message for 'discover' ConnectionProfile request. */ export interface DiscoverConnectionProfileRequest { /** * Optional. An ad-hoc connection profile configuration. */ connectionProfile?: ConnectionProfile; /** * Optional. A reference to an existing connection profile. */ connectionProfileName?: string; /** * Optional. Whether to retrieve the full hierarchy of data objects (TRUE) or * only the current level (FALSE). */ fullHierarchy?: boolean; /** * Optional. The number of hierarchy levels below the current level to be * retrieved. */ hierarchyDepth?: number; /** * Optional. MongoDB cluster to enrich with child data objects and metadata. */ mongodbCluster?: MongodbCluster; /** * Optional. MySQL RDBMS to enrich with child data objects and metadata. */ mysqlRdbms?: MysqlRdbms; /** * Optional. Oracle RDBMS to enrich with child data objects and metadata. */ oracleRdbms?: OracleRdbms; /** * Optional. PostgreSQL RDBMS to enrich with child data objects and metadata. */ postgresqlRdbms?: PostgresqlRdbms; /** * Optional. Salesforce organization to enrich with child data objects and * metadata. */ salesforceOrg?: SalesforceOrg; /** * Optional. Spanner database to enrich with child data objects and metadata. */ spannerDatabase?: SpannerDatabase; /** * Optional. SQLServer RDBMS to enrich with child data objects and metadata. */ sqlServerRdbms?: SqlServerRdbms; } function serializeDiscoverConnectionProfileRequest(data: any): DiscoverConnectionProfileRequest { return { ...data, spannerDatabase: data["spannerDatabase"] !== undefined ? serializeSpannerDatabase(data["spannerDatabase"]) : undefined, }; } function deserializeDiscoverConnectionProfileRequest(data: any): DiscoverConnectionProfileRequest { return { ...data, spannerDatabase: data["spannerDatabase"] !== undefined ? deserializeSpannerDatabase(data["spannerDatabase"]) : undefined, }; } /** * Response from a discover request. */ export interface DiscoverConnectionProfileResponse { /** * Enriched MongoDB cluster. */ mongodbCluster?: MongodbCluster; /** * Enriched MySQL RDBMS object. */ mysqlRdbms?: MysqlRdbms; /** * Enriched Oracle RDBMS object. */ oracleRdbms?: OracleRdbms; /** * Enriched PostgreSQL RDBMS object. */ postgresqlRdbms?: PostgresqlRdbms; /** * Enriched Salesforce organization. */ salesforceOrg?: SalesforceOrg; /** * Enriched Spanner database. */ spannerDatabase?: SpannerDatabase; /** * Enriched SQLServer RDBMS object. */ sqlServerRdbms?: SqlServerRdbms; } function serializeDiscoverConnectionProfileResponse(data: any): DiscoverConnectionProfileResponse { return { ...data, spannerDatabase: data["spannerDatabase"] !== undefined ? serializeSpannerDatabase(data["spannerDatabase"]) : undefined, }; } function deserializeDiscoverConnectionProfileResponse(data: any): DiscoverConnectionProfileResponse { return { ...data, spannerDatabase: data["spannerDatabase"] !== undefined ? deserializeSpannerDatabase(data["spannerDatabase"]) : undefined, }; } /** * Configuration to drop large object values. */ export interface DropLargeObjects { } /** * A generic empty message that you can re-use to avoid defining duplicated * empty messages in your APIs. A typical example is to use it as the request or * the response type of an API method. For instance: service Foo { rpc * Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } */ export interface Empty { } /** * Message to represent the option where Datastream will enforce encryption and * authenticate server identity. ca_certificate must be set if user selects this * option. */ export interface EncryptionAndServerValidation { /** * Optional. Input only. PEM-encoded certificate of the CA that signed the * source database server's certificate. */ caCertificate?: string; /** * Optional. The hostname mentioned in the Subject or SAN extension of the * server certificate. This field is used for bypassing the hostname * validation while verifying server certificate. This is required for * scenarios where the host name that datastream connects to is different from * the certificate's subject. This specifically happens for private * connectivity. It could also happen when the customer provides a public IP * in connection profile but the same is not present in the server * certificate. */ serverCertificateHostname?: string; } /** * Message to represent the option where encryption is not enforced. An empty * message right now to allow future extensibility. */ export interface EncryptionNotEnforced { } /** * Represent a user-facing Error. */ export interface Error { /** * Additional information about the error. */ details?: { [key: string]: string }; /** * The time when the error occurred. */ errorTime?: Date; /** * A unique identifier for this specific error, allowing it to be traced * throughout the system in logs and API responses. */ errorUuid?: string; /** * A message containing more information about the error that occurred. */ message?: string; /** * A title that explains the reason for the error. */ reason?: string; } function serializeError(data: any): Error { return { ...data, errorTime: data["errorTime"] !== undefined ? data["errorTime"].toISOString() : undefined, }; } function deserializeError(data: any): Error { return { ...data, errorTime: data["errorTime"] !== undefined ? new Date(data["errorTime"]) : undefined, }; } /** * Describes the cause of the error with structured details. Example of an * error when contacting the "pubsub.googleapis.com" API when it is not enabled: * { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { * "resource": "projects/123", "service": "pubsub.googleapis.com" } } This * response indicates that the pubsub.googleapis.com API is not enabled. Example * of an error that is returned when attempting to create a Spanner instance in * a region that is out of stock: { "reason": "STOCKOUT" "domain": * "spanner.googleapis.com", "metadata": { "availableRegions": * "us-central1,us-east2" } } */ export interface ErrorInfo { /** * The logical grouping to which the "reason" belongs. The error domain is * typically the registered service name of the tool or product that generates * the error. Example: "pubsub.googleapis.com". If the error is generated by * some common infrastructure, the error domain must be a globally unique * value that identifies the infrastructure. For Google API infrastructure, * the error domain is "googleapis.com". */ domain?: string; /** * Additional structured details about this error. Keys must match a regular * expression of `a-z+` but should ideally be lowerCamelCase. Also, they must * be limited to 64 characters in length. When identifying the current value * of an exceeded limit, the units should be contained in the key, not the * value. For example, rather than `{"instanceLimit": "100/request"}`, should * be returned as, `{"instanceLimitPerRequest": "100"}`, if the client exceeds * the number of instances that can be created in a single (batch) request. */ metadata?: { [key: string]: string }; /** * The reason of the error. This is a constant value that identifies the * proximate cause of the error. Error reasons are unique within a particular * domain of errors. This should be at most 63 characters and match a regular * expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. */ reason?: string; } /** * Represents a filter for included data on a stream object. */ export interface EventFilter { /** * An SQL-query Where clause selecting which data should be included, not * including the "WHERE" keyword. e.g., `t.key1 = 'value1' AND t.key2 = * 'value2'` */ sqlWhereClause?: string; } /** * Response message for a 'FetchStaticIps' response. */ export interface FetchStaticIpsResponse { /** * A token that can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * list of static ips by account */ staticIps?: string[]; } /** * A message type used to describe a single bad request field. */ export interface FieldViolation { /** * A description of why the request element is bad. */ description?: string; /** * A path that leads to a field in the request body. The value will be a * sequence of dot-separated identifiers that identify a protocol buffer * field. Consider the following: message CreateContactRequest { message * EmailAddress { enum Type { TYPE_UNSPECIFIED = 0; HOME = 1; WORK = 2; } * optional string email = 1; repeated EmailType type = 2; } string full_name * = 1; repeated EmailAddress email_addresses = 2; } In this example, in proto * `field` could take one of the following values: * `full_name` for a * violation in the `full_name` value * `email_addresses[1].email` for a * violation in the `email` field of the first `email_addresses` message * * `email_addresses[3].type[2]` for a violation in the second `type` value in * the third `email_addresses` message. In JSON, the same values are * represented as: * `fullName` for a violation in the `fullName` value * * `emailAddresses[1].email` for a violation in the `email` field of the first * `emailAddresses` message * `emailAddresses[3].type[2]` for a violation in * the second `type` value in the third `emailAddresses` message. */ field?: string; /** * Provides a localized error message for field-level errors that is safe to * return to the API consumer. */ localizedMessage?: LocalizedMessage; /** * The reason of the field-level error. This is a constant value that * identifies the proximate cause of the field-level error. It should uniquely * identify the type of the FieldViolation within the scope of the * google.rpc.ErrorInfo.domain. This should be at most 63 characters and match * a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. */ reason?: string; } /** * Forward SSH Tunnel connectivity. */ export interface ForwardSshTunnelConnectivity { /** * Required. Hostname for the SSH tunnel. */ hostname?: string; /** * Input only. SSH password. */ password?: string; /** * Port for the SSH tunnel, default value is 22. */ port?: number; /** * Input only. SSH private key. */ privateKey?: string; /** * Required. Username for the SSH tunnel. */ username?: string; } /** * Google Cloud Storage destination configuration */ export interface GcsDestinationConfig { /** * AVRO file format configuration. */ avroFileFormat?: AvroFileFormat; /** * The maximum duration for which new events are added before a file is * closed and a new file is created. Values within the range of 15-60 seconds * are allowed. */ fileRotationInterval?: number /* Duration */; /** * The maximum file size to be saved in the bucket. */ fileRotationMb?: number; /** * JSON file format configuration. */ jsonFileFormat?: JsonFileFormat; /** * Path inside the Cloud Storage bucket to write data to. */ path?: string; } function serializeGcsDestinationConfig(data: any): GcsDestinationConfig { return { ...data, fileRotationInterval: data["fileRotationInterval"] !== undefined ? data["fileRotationInterval"] : undefined, }; } function deserializeGcsDestinationConfig(data: any): GcsDestinationConfig { return { ...data, fileRotationInterval: data["fileRotationInterval"] !== undefined ? data["fileRotationInterval"] : undefined, }; } /** * Profile for connecting to a Cloud Storage destination. */ export interface GcsProfile { /** * Required. The Cloud Storage bucket name. */ bucket?: string; /** * Optional. The root path inside the Cloud Storage bucket. */ rootPath?: string; } /** * Use GTID based replication. */ export interface Gtid { } /** * Provides links to documentation or for performing an out of band action. For * example, if a quota check failed with an error indicating the calling project * hasn't enabled the accessed service, this can contain a URL pointing directly * to the right place in the developer console to flip the bit. */ export interface Help { /** * URL(s) pointing to additional information on handling the current error. */ links?: Link[]; } /** * A HostAddress represents a transport end point, which is the combination of * an IP address or hostname and a port number. */ export interface HostAddress { /** * Required. Hostname for the connection. */ hostname?: string; /** * Optional. Port for the connection. */ port?: number; } /** * Ingestion time partitioning. see * https://cloud.google.com/bigquery/docs/partitioned-tables#ingestion_time */ export interface IngestionTimePartition { /** * Optional. Partition granularity */ partitioningTimeGranularity?: | "PARTITIONING_TIME_GRANULARITY_UNSPECIFIED" | "PARTITIONING_TIME_GRANULARITY_HOUR" | "PARTITIONING_TIME_GRANULARITY_DAY" | "PARTITIONING_TIME_GRANULARITY_MONTH" | "PARTITIONING_TIME_GRANULARITY_YEAR"; } /** * Integer range partitioning. see * https://cloud.google.com/bigquery/docs/partitioned-tables#integer_range */ export interface IntegerRangePartition { /** * Required. The partitioning column. */ column?: string; /** * Required. The ending value for range partitioning (exclusive). */ end?: bigint; /** * Required. The interval of each range within the partition. */ interval?: bigint; /** * Required. The starting value for range partitioning (inclusive). */ start?: bigint; } function serializeIntegerRangePartition(data: any): IntegerRangePartition { return { ...data, end: data["end"] !== undefined ? String(data["end"]) : undefined, interval: data["interval"] !== undefined ? String(data["interval"]) : undefined, start: data["start"] !== undefined ? String(data["start"]) : undefined, }; } function deserializeIntegerRangePartition(data: any): IntegerRangePartition { return { ...data, end: data["end"] !== undefined ? BigInt(data["end"]) : undefined, interval: data["interval"] !== undefined ? BigInt(data["interval"]) : undefined, start: data["start"] !== undefined ? BigInt(data["start"]) : undefined, }; } /** * JSON file format configuration. */ export interface JsonFileFormat { /** * Compression of the loaded JSON file. */ compression?: | "JSON_COMPRESSION_UNSPECIFIED" | "NO_COMPRESSION" | "GZIP"; /** * The schema file format along JSON data files. */ schemaFileFormat?: | "SCHEMA_FILE_FORMAT_UNSPECIFIED" | "NO_SCHEMA_FILE" | "AVRO_SCHEMA_FILE"; } /** * Describes a URL link. */ export interface Link { /** * Describes what the link offers. */ description?: string; /** * The URL of the link. */ url?: string; } /** * Response message for listing connection profiles. */ export interface ListConnectionProfilesResponse { /** * List of connection profiles. */ connectionProfiles?: ConnectionProfile[]; /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * Locations that could not be reached. */ unreachable?: string[]; } /** * The response message for Locations.ListLocations. */ export interface ListLocationsResponse { /** * A list of locations that matches the specified filter in the request. */ locations?: Location[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * The response message for Operations.ListOperations. */ export interface ListOperationsResponse { /** * The standard List next-page token. */ nextPageToken?: string; /** * A list of operations that matches the specified filter in the request. */ operations?: Operation[]; /** * Unordered list. Unreachable resources. Populated when the request sets * `ListOperationsRequest.return_partial_success` and reads across * collections. For example, when attempting to list all resources across all * supported locations. */ unreachable?: string[]; } /** * Response containing a list of private connection configurations. */ export interface ListPrivateConnectionsResponse { /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * List of private connectivity configurations. */ privateConnections?: PrivateConnection[]; /** * Locations that could not be reached. */ unreachable?: string[]; } /** * Route list response. */ export interface ListRoutesResponse { /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * List of Routes. */ routes?: Route[]; /** * Locations that could not be reached. */ unreachable?: string[]; } /** * Response containing the objects for a stream. */ export interface ListStreamObjectsResponse { /** * A token, which can be sent as `page_token` to retrieve the next page. */ nextPageToken?: string; /** * List of stream objects. */ streamObjects?: StreamObject[]; } /** * Response message for listing streams. */ export interface ListStreamsResponse { /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * List of streams */ streams?: Stream[]; /** * Locations that could not be reached. */ unreachable?: string[]; } function serializeListStreamsResponse(data: any): ListStreamsResponse { return { ...data, streams: data["streams"] !== undefined ? data["streams"].map((item: any) => (serializeStream(item))) : undefined, }; } function deserializeListStreamsResponse(data: any): ListStreamsResponse { return { ...data, streams: data["streams"] !== undefined ? data["streams"].map((item: any) => (deserializeStream(item))) : undefined, }; } /** * Provides a localized error message that is safe to return to the user which * can be attached to an RPC error. */ export interface LocalizedMessage { /** * The locale used following the specification defined at * https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", * "fr-CH", "es-MX" */ locale?: string; /** * The localized error message in the above locale. */ message?: string; } /** * A resource that represents a Google Cloud location. */ export interface Location { /** * The friendly name for this location, typically a nearby city name. For * example, "Tokyo". */ displayName?: string; /** * Cross-service attributes for the location. For example * {"cloud.googleapis.com/region": "us-east1"} */ labels?: { [key: string]: string }; /** * The canonical id for this location. For example: `"us-east1"`. */ locationId?: string; /** * Service-specific metadata. For example the available capacity at the given * location. */ metadata?: { [key: string]: any }; /** * Resource name for the location, which may vary between implementations. * For example: `"projects/example-project/locations/us-east1"` */ name?: string; } /** * Configuration to specify the Oracle directories to access the log files. */ export interface LogFileDirectories { /** * Required. Oracle directory for archived logs. */ archivedLogDirectory?: string; /** * Required. Oracle directory for online logs. */ onlineLogDirectory?: string; } /** * Configuration to use LogMiner CDC method. */ export interface LogMiner { } /** * Request for looking up a specific stream object by its source object * identifier. */ export interface LookupStreamObjectRequest { /** * Required. The source object identifier which maps to the stream object. */ sourceObjectIdentifier?: SourceObjectIdentifier; } /** * Merge mode defines that all changes to a table will be merged at the * destination table. */ export interface Merge { } /** * MongoDB change stream position */ export interface MongodbChangeStreamPosition { /** * Required. The timestamp to start change stream from. */ startTime?: Date; } function serializeMongodbChangeStreamPosition(data: any): MongodbChangeStreamPosition { return { ...data, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeMongodbChangeStreamPosition(data: any): MongodbChangeStreamPosition { return { ...data, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * MongoDB Cluster structure. */ export interface MongodbCluster { /** * MongoDB databases in the cluster. */ databases?: MongodbDatabase[]; } /** * MongoDB Collection. */ export interface MongodbCollection { /** * The collection name. */ collection?: string; /** * Fields in the collection. */ fields?: MongodbField[]; } /** * MongoDB Database. */ export interface MongodbDatabase { /** * Collections in the database. */ collections?: MongodbCollection[]; /** * The database name. */ database?: string; } /** * MongoDB Field. */ export interface MongodbField { /** * The field name. */ field?: string; } /** * MongoDB data source object identifier. */ export interface MongodbObjectIdentifier { /** * Required. The collection name. */ collection?: string; /** * Required. The database name. */ database?: string; } /** * Profile for connecting to a MongoDB source. */ export interface MongodbProfile { /** * Optional. Specifies additional options for the MongoDB connection. The * options should be sent as key-value pairs, for example: `additional_options * = {"serverSelectionTimeoutMS": "10000", "directConnection": "true"}`. Keys * are case-sensitive and should match the official MongoDB connection string * options: * https://www.mongodb.com/docs/manual/reference/connection-string-options/ * The server will not modify the values provided by the user. */ additionalOptions?: { [key: string]: string }; /** * Required. List of host addresses for a MongoDB cluster. For SRV connection * format, this list must contain exactly one DNS host without a port. For * Standard connection format, this list must contain all the required hosts * in the cluster with their respective ports. */ hostAddresses?: HostAddress[]; /** * Optional. Password for the MongoDB connection. Mutually exclusive with the * `secret_manager_stored_password` field. */ password?: string; /** * Optional. Name of the replica set. Only needed for self hosted replica set * type MongoDB cluster. For SRV connection format, this field must be empty. * For Standard connection format, this field must be specified. */ replicaSet?: string; /** * Optional. A reference to a Secret Manager resource name storing the * SQLServer connection password. Mutually exclusive with the `password` * field. */ secretManagerStoredPassword?: string; /** * Srv connection format. */ srvConnectionFormat?: SrvConnectionFormat; /** * Optional. SSL configuration for the MongoDB connection. */ sslConfig?: MongodbSslConfig; /** * Standard connection format. */ standardConnectionFormat?: StandardConnectionFormat; /** * Required. Username for the MongoDB connection. */ username?: string; } /** * Configuration for syncing data from a MongoDB source. */ export interface MongodbSourceConfig { /** * The MongoDB collections to exclude from the stream. */ excludeObjects?: MongodbCluster; /** * The MongoDB collections to include in the stream. */ includeObjects?: MongodbCluster; /** * Optional. MongoDB JSON mode to use for the stream. */ jsonMode?: | "MONGODB_JSON_MODE_UNSPECIFIED" | "STRICT" | "CANONICAL"; /** * Optional. Maximum number of concurrent backfill tasks. The number should * be non-negative and less than or equal to 50. If not set (or set to 0), the * system's default value is used */ maxConcurrentBackfillTasks?: number; } /** * MongoDB SSL configuration information. */ export interface MongodbSslConfig { /** * Optional. Input only. PEM-encoded certificate of the CA that signed the * source database server's certificate. */ caCertificate?: string; /** * Output only. Indicates whether the ca_certificate field is set. */ readonly caCertificateSet?: boolean; /** * Optional. Input only. PEM-encoded certificate that will be used by the * replica to authenticate against the source database server. If this field * is used then the 'client_key' and the 'ca_certificate' fields are * mandatory. */ clientCertificate?: string; /** * Output only. Indicates whether the client_certificate field is set. */ readonly clientCertificateSet?: boolean; /** * Optional. Input only. PEM-encoded private key associated with the Client * Certificate. If this field is used then the 'client_certificate' and the * 'ca_certificate' fields are mandatory. */ clientKey?: string; /** * Output only. Indicates whether the client_key field is set. */ readonly clientKeySet?: boolean; /** * Optional. Input only. A reference to a Secret Manager resource name * storing the PEM-encoded private key associated with the Client Certificate. * If this field is used then the 'client_certificate' and the * 'ca_certificate' fields are mandatory. Mutually exclusive with the * `client_key` field. */ secretManagerStoredClientKey?: string; } /** * CDC strategy to start replicating from the most recent position in the * source. */ export interface MostRecentStartPosition { } /** * MySQL Column. */ export interface MysqlColumn { /** * Column collation. */ collation?: string; /** * The column name. */ column?: string; /** * The MySQL data type. Full data types list can be found here: * https://dev.mysql.com/doc/refman/8.0/en/data-types.html */ dataType?: string; /** * Column length. */ length?: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * Column precision. */ precision?: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; /** * Column scale. */ scale?: number; } /** * MySQL database. */ export interface MysqlDatabase { /** * The database name. */ database?: string; /** * Tables in the database. */ mysqlTables?: MysqlTable[]; } /** * MySQL GTID position */ export interface MysqlGtidPosition { /** * Required. The gtid set to start replication from. */ gtidSet?: string; } /** * MySQL log position */ export interface MysqlLogPosition { /** * Required. The binary log file name. */ logFile?: string; /** * Optional. The position within the binary log file. Default is head of * file. */ logPosition?: number; } /** * Mysql data source object identifier. */ export interface MysqlObjectIdentifier { /** * Required. The database name. */ database?: string; /** * Required. The table name. */ table?: string; } /** * Profile for connecting to a MySQL source. */ export interface MysqlProfile { /** * Required. Hostname for the MySQL connection. */ hostname?: string; /** * Optional. Input only. Password for the MySQL connection. Mutually * exclusive with the `secret_manager_stored_password` field. */ password?: string; /** * Port for the MySQL connection, default value is 3306. */ port?: number; /** * Optional. A reference to a Secret Manager resource name storing the MySQL * connection password. Mutually exclusive with the `password` field. */ secretManagerStoredPassword?: string; /** * SSL configuration for the MySQL connection. */ sslConfig?: MysqlSslConfig; /** * Required. Username for the MySQL connection. */ username?: string; } /** * MySQL database structure */ export interface MysqlRdbms { /** * Mysql databases on the server */ mysqlDatabases?: MysqlDatabase[]; } /** * Configuration for syncing data from a MySQL source. */ export interface MysqlSourceConfig { /** * Use Binary log position based replication. */ binaryLogPosition?: BinaryLogPosition; /** * The MySQL objects to exclude from the stream. */ excludeObjects?: MysqlRdbms; /** * Use GTID based replication. */ gtid?: Gtid; /** * The MySQL objects to retrieve from the source. */ includeObjects?: MysqlRdbms; /** * Maximum number of concurrent backfill tasks. The number should be non * negative. If not set (or set to 0), the system's default value will be * used. */ maxConcurrentBackfillTasks?: number; /** * Maximum number of concurrent CDC tasks. The number should be non negative. * If not set (or set to 0), the system's default value will be used. */ maxConcurrentCdcTasks?: number; } /** * MySQL SSL configuration information. */ export interface MysqlSslConfig { /** * Input only. PEM-encoded certificate of the CA that signed the source * database server's certificate. */ caCertificate?: string; /** * Output only. Indicates whether the ca_certificate field is set. */ readonly caCertificateSet?: boolean; /** * Optional. Input only. PEM-encoded certificate that will be used by the * replica to authenticate against the source database server. If this field * is used then the 'client_key' and the 'ca_certificate' fields are * mandatory. */ clientCertificate?: string; /** * Output only. Indicates whether the client_certificate field is set. */ readonly clientCertificateSet?: boolean; /** * Optional. Input only. PEM-encoded private key associated with the Client * Certificate. If this field is used then the 'client_certificate' and the * 'ca_certificate' fields are mandatory. */ clientKey?: string; /** * Output only. Indicates whether the client_key field is set. */ readonly clientKeySet?: boolean; } /** * MySQL table. */ export interface MysqlTable { /** * MySQL columns in the database. When unspecified as part of include/exclude * objects, includes/excludes everything. */ mysqlColumns?: MysqlColumn[]; /** * The table name. */ table?: string; } /** * CDC strategy to resume replication from the next available position in the * source. */ export interface NextAvailableStartPosition { } /** * OAuth2 Client Credentials. */ export interface Oauth2ClientCredentials { /** * Required. Client ID for Salesforce OAuth2 Client Credentials. */ clientId?: string; /** * Optional. Client secret for Salesforce OAuth2 Client Credentials. Mutually * exclusive with the `secret_manager_stored_client_secret` field. */ clientSecret?: string; /** * Optional. A reference to a Secret Manager resource name storing the * Salesforce OAuth2 client_secret. Mutually exclusive with the * `client_secret` field. */ secretManagerStoredClientSecret?: string; } /** * Object filter to apply the rules to. */ export interface ObjectFilter { /** * Specific source object identifier. */ sourceObjectIdentifier?: SourceObjectIdentifier; } /** * This resource represents a long-running operation that is the result of a * network API call. */ export interface Operation { /** * If the value is `false`, it means the operation is still in progress. If * `true`, the operation is completed, and either `error` or `response` is * available. */ done?: boolean; /** * The error result of the operation in case of failure or cancellation. */ error?: Status; /** * Service-specific metadata associated with the operation. It typically * contains progress information and common metadata such as create time. Some * services might not provide such metadata. Any method that returns a * long-running operation should document the metadata type, if any. */ metadata?: { [key: string]: any }; /** * The server-assigned name, which is only unique within the same service * that originally returns it. If you use the default HTTP mapping, the `name` * should be a resource name ending with `operations/{unique_id}`. */ name?: string; /** * The normal, successful response of the operation. If the original method * returns no data on success, such as `Delete`, the response is * `google.protobuf.Empty`. If the original method is standard * `Get`/`Create`/`Update`, the response should be the resource. For other * methods, the response should have the type `XxxResponse`, where `Xxx` is * the original method name. For example, if the original method name is * `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. */ response?: { [key: string]: any }; } /** * Represents the metadata of the long-running operation. */ export interface OperationMetadata { /** * Output only. API version used to start the operation. */ readonly apiVersion?: string; /** * Output only. The time the operation was created. */ readonly createTime?: Date; /** * Output only. The time the operation finished running. */ readonly endTime?: Date; /** * Output only. Identifies whether the user has requested cancellation of the * operation. Operations that have successfully been cancelled have * google.longrunning.Operation.error value with a google.rpc.Status.code of * 1, corresponding to `Code.CANCELLED`. */ readonly requestedCancellation?: boolean; /** * Output only. Human-readable status of the operation, if any. */ readonly statusMessage?: string; /** * Output only. Server-defined resource path for the target of the operation. */ readonly target?: string; /** * Output only. Results of executed validations if there are any. */ readonly validationResult?: ValidationResult; /** * Output only. Name of the verb executed by the operation. */ readonly verb?: string; } /** * Configuration for Oracle Automatic Storage Management (ASM) connection. */ export interface OracleAsmConfig { /** * Required. ASM service name for the Oracle ASM connection. */ asmService?: string; /** * Optional. Connection string attributes */ connectionAttributes?: { [key: string]: string }; /** * Required. Hostname for the Oracle ASM connection. */ hostname?: string; /** * Optional. SSL configuration for the Oracle connection. */ oracleSslConfig?: OracleSslConfig; /** * Optional. Password for the Oracle ASM connection. Mutually exclusive with * the `secret_manager_stored_password` field. */ password?: string; /** * Required. Port for the Oracle ASM connection. */ port?: number; /** * Optional. A reference to a Secret Manager resource name storing the Oracle * ASM connection password. Mutually exclusive with the `password` field. */ secretManagerStoredPassword?: string; /** * Required. Username for the Oracle ASM connection. */ username?: string; } /** * Configuration to use Oracle ASM to access the log files. */ export interface OracleAsmLogFileAccess { } /** * Oracle Column. */ export interface OracleColumn { /** * The column name. */ column?: string; /** * The Oracle data type. */ dataType?: string; /** * Column encoding. */ encoding?: string; /** * Column length. */ length?: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * Column precision. */ precision?: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; /** * Column scale. */ scale?: number; } /** * Oracle data source object identifier. */ export interface OracleObjectIdentifier { /** * Required. The schema name. */ schema?: string; /** * Required. The table name. */ table?: string; } /** * Profile for connecting to an Oracle source. */ export interface OracleProfile { /** * Connection string attributes */ connectionAttributes?: { [key: string]: string }; /** * Required. Database for the Oracle connection. */ databaseService?: string; /** * Required. Hostname for the Oracle connection. */ hostname?: string; /** * Optional. Configuration for Oracle ASM connection. */ oracleAsmConfig?: OracleAsmConfig; /** * Optional. SSL configuration for the Oracle connection. */ oracleSslConfig?: OracleSslConfig; /** * Optional. Password for the Oracle connection. Mutually exclusive with the * `secret_manager_stored_password` field. */ password?: string; /** * Port for the Oracle connection, default value is 1521. */ port?: number; /** * Optional. A reference to a Secret Manager resource name storing the Oracle * connection password. Mutually exclusive with the `password` field. */ secretManagerStoredPassword?: string; /** * Required. Username for the Oracle connection. */ username?: string; } /** * Oracle database structure. */ export interface OracleRdbms { /** * Oracle schemas/databases in the database server. */ oracleSchemas?: OracleSchema[]; } /** * Oracle schema. */ export interface OracleSchema { /** * Tables in the schema. */ oracleTables?: OracleTable[]; /** * The schema name. */ schema?: string; } /** * Oracle SCN position */ export interface OracleScnPosition { /** * Required. SCN number from where Logs will be read */ scn?: bigint; } function serializeOracleScnPosition(data: any): OracleScnPosition { return { ...data, scn: data["scn"] !== undefined ? String(data["scn"]) : undefined, }; } function deserializeOracleScnPosition(data: any): OracleScnPosition { return { ...data, scn: data["scn"] !== undefined ? BigInt(data["scn"]) : undefined, }; } /** * Configuration for syncing data from an Oracle source. */ export interface OracleSourceConfig { /** * Use Binary Log Parser. */ binaryLogParser?: BinaryLogParser; /** * Drop large object values. */ dropLargeObjects?: DropLargeObjects; /** * The Oracle objects to exclude from the stream. */ excludeObjects?: OracleRdbms; /** * The Oracle objects to include in the stream. */ includeObjects?: OracleRdbms; /** * Use LogMiner. */ logMiner?: LogMiner; /** * Maximum number of concurrent backfill tasks. The number should be * non-negative. If not set (or set to 0), the system's default value is used. */ maxConcurrentBackfillTasks?: number; /** * Maximum number of concurrent CDC tasks. The number should be non-negative. * If not set (or set to 0), the system's default value is used. */ maxConcurrentCdcTasks?: number; /** * Stream large object values. */ streamLargeObjects?: StreamLargeObjects; } /** * Oracle SSL configuration information. */ export interface OracleSslConfig { /** * Input only. PEM-encoded certificate of the CA that signed the source * database server's certificate. */ caCertificate?: string; /** * Output only. Indicates whether the ca_certificate field has been set for * this Connection-Profile. */ readonly caCertificateSet?: boolean; /** * Optional. The distinguished name (DN) mentioned in the server certificate. * This corresponds to SSL_SERVER_CERT_DN sqlnet parameter. Refer * https://docs.oracle.com/en/database/oracle/oracle-database/19/netrf/local-naming-parameters-in-tns-ora-file.html#GUID-70AB0695-A9AA-4A94-B141-4C605236EEB7 * If this field is not provided, the DN matching is not enforced. */ serverCertificateDistinguishedName?: string; } /** * Oracle table. */ export interface OracleTable { /** * Oracle columns in the schema. When unspecified as part of include/exclude * objects, includes/excludes everything. */ oracleColumns?: OracleColumn[]; /** * The table name. */ table?: string; } /** * PostgreSQL Column. */ export interface PostgresqlColumn { /** * The column name. */ column?: string; /** * The PostgreSQL data type. */ dataType?: string; /** * Column length. */ length?: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * Column precision. */ precision?: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; /** * Column scale. */ scale?: number; } /** * PostgreSQL data source object identifier. */ export interface PostgresqlObjectIdentifier { /** * Required. The schema name. */ schema?: string; /** * Required. The table name. */ table?: string; } /** * Profile for connecting to a PostgreSQL source. */ export interface PostgresqlProfile { /** * Required. Database for the PostgreSQL connection. */ database?: string; /** * Required. Hostname for the PostgreSQL connection. */ hostname?: string; /** * Optional. Password for the PostgreSQL connection. Mutually exclusive with * the `secret_manager_stored_password` field. */ password?: string; /** * Port for the PostgreSQL connection, default value is 5432. */ port?: number; /** * Optional. A reference to a Secret Manager resource name storing the * PostgreSQL connection password. Mutually exclusive with the `password` * field. */ secretManagerStoredPassword?: string; /** * Optional. SSL configuration for the PostgreSQL connection. In case * PostgresqlSslConfig is not set, the connection will use the default SSL * mode, which is `prefer` (i.e. this mode will only use encryption if enabled * from database side, otherwise will use unencrypted communication) */ sslConfig?: PostgresqlSslConfig; /** * Required. Username for the PostgreSQL connection. */ username?: string; } /** * PostgreSQL database structure. */ export interface PostgresqlRdbms { /** * PostgreSQL schemas in the database server. */ postgresqlSchemas?: PostgresqlSchema[]; } /** * PostgreSQL schema. */ export interface PostgresqlSchema { /** * Tables in the schema. */ postgresqlTables?: PostgresqlTable[]; /** * The schema name. */ schema?: string; } /** * Configuration for syncing data from a PostgreSQL source. */ export interface PostgresqlSourceConfig { /** * The PostgreSQL objects to exclude from the stream. */ excludeObjects?: PostgresqlRdbms; /** * The PostgreSQL objects to include in the stream. */ includeObjects?: PostgresqlRdbms; /** * Maximum number of concurrent backfill tasks. The number should be non * negative. If not set (or set to 0), the system's default value will be * used. */ maxConcurrentBackfillTasks?: number; /** * Required. The name of the publication that includes the set of all tables * that are defined in the stream's include_objects. */ publication?: string; /** * Required. Immutable. The name of the logical replication slot that's * configured with the pgoutput plugin. */ replicationSlot?: string; } /** * PostgreSQL SSL configuration information. */ export interface PostgresqlSslConfig { /** * If this field is set, the communication will be encrypted with TLS * encryption and both the server identity and the client identity will be * authenticated. */ serverAndClientVerification?: ServerAndClientVerification; /** * If this field is set, the communication will be encrypted with TLS * encryption and the server identity will be authenticated. */ serverVerification?: ServerVerification; } /** * PostgreSQL table. */ export interface PostgresqlTable { /** * PostgreSQL columns in the schema. When unspecified as part of * include/exclude objects, includes/excludes everything. */ postgresqlColumns?: PostgresqlColumn[]; /** * The table name. */ table?: string; } /** * Describes what preconditions have failed. For example, if an RPC failed * because it required the Terms of Service to be acknowledged, it could list * the terms of service violation in the PreconditionFailure message. */ export interface PreconditionFailure { /** * Describes all precondition violations. */ violations?: PreconditionFailureViolation[]; } /** * A message type used to describe a single precondition failure. */ export interface PreconditionFailureViolation { /** * A description of how the precondition failed. Developers can use this * description to understand how to fix the failure. For example: "Terms of * service not accepted". */ description?: string; /** * The subject, relative to the type, that failed. For example, * "google.com/cloud" relative to the "TOS" type would indicate which terms of * service is being referenced. */ subject?: string; /** * The type of PreconditionFailure. We recommend using a service-specific * enum type to define the supported precondition violation subjects. For * example, "TOS" for "Terms of Service violation". */ type?: string; } /** * The PrivateConnection resource is used to establish private connectivity * between Datastream and a customer's network. */ export interface PrivateConnection { /** * Output only. The create time of the resource. */ readonly createTime?: Date; /** * Required. Display name. */ displayName?: string; /** * Output only. In case of error, the details of the error in a user-friendly * format. */ readonly error?: Error; /** * Labels. */ labels?: { [key: string]: string }; /** * Output only. Identifier. The resource's name. */ readonly name?: string; /** * PSC Interface Config. */ pscInterfaceConfig?: PscInterfaceConfig; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. The state of the Private Connection. */ readonly state?: | "STATE_UNSPECIFIED" | "CREATING" | "CREATED" | "FAILED" | "DELETING" | "FAILED_TO_DELETE"; /** * Output only. The update time of the resource. */ readonly updateTime?: Date; /** * VPC Peering Config. */ vpcPeeringConfig?: VpcPeeringConfig; } /** * Private Connectivity */ export interface PrivateConnectivity { /** * Required. A reference to a private connection resource. Format: * `projects/{project}/locations/{location}/privateConnections/{name}` */ privateConnection?: string; } /** * Additional options for Datastream#projectsLocationsConnectionProfilesCreate. */ export interface ProjectsLocationsConnectionProfilesCreateOptions { /** * Required. The connection profile identifier. */ connectionProfileId?: string; /** * Optional. Create the connection profile without validating it. */ force?: boolean; /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes since the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; /** * Optional. Only validate the connection profile, but don't create any * resources. The default is false. */ validateOnly?: boolean; } /** * Additional options for Datastream#projectsLocationsConnectionProfilesDelete. */ export interface ProjectsLocationsConnectionProfilesDeleteOptions { /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes after the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; } /** * Additional options for Datastream#projectsLocationsConnectionProfilesList. */ export interface ProjectsLocationsConnectionProfilesListOptions { /** * Optional. Filter request. */ filter?: string; /** * Optional. Order by fields for the result. */ orderBy?: string; /** * Optional. Maximum number of connection profiles to return. If unspecified, * at most 50 connection profiles will be returned. The maximum value is 1000; * values above 1000 will be coerced to 1000. */ pageSize?: number; /** * Optional. Page token received from a previous `ListConnectionProfiles` * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to `ListConnectionProfiles` must match the call * that provided the page token. */ pageToken?: string; } /** * Additional options for Datastream#projectsLocationsConnectionProfilesPatch. */ export interface ProjectsLocationsConnectionProfilesPatchOptions { /** * Optional. Update the connection profile without validating it. */ force?: boolean; /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes since the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; /** * Optional. Field mask is used to specify the fields to be overwritten in * the ConnectionProfile resource by the update. The fields specified in the * update_mask are relative to the resource, not the full request. A field * will be overwritten if it is in the mask. If the user does not provide a * mask then all fields will be overwritten. */ updateMask?: string /* FieldMask */; /** * Optional. Only validate the connection profile, but don't update any * resources. The default is false. */ validateOnly?: boolean; } function serializeProjectsLocationsConnectionProfilesPatchOptions(data: any): ProjectsLocationsConnectionProfilesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsConnectionProfilesPatchOptions(data: any): ProjectsLocationsConnectionProfilesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for Datastream#projectsLocationsFetchStaticIps. */ export interface ProjectsLocationsFetchStaticIpsOptions { /** * Optional. Maximum number of Ips to return, will likely not be specified. */ pageSize?: number; /** * Optional. A page token, received from a previous `ListStaticIps` call. * will likely not be specified. */ pageToken?: string; } /** * Additional options for Datastream#projectsLocationsList. */ export interface ProjectsLocationsListOptions { /** * Optional. Do not use this field. It is unsupported and is ignored unless * explicitly documented otherwise. This is primarily for internal usage. */ extraLocationTypes?: string; /** * A filter to narrow down results to a preferred subset. The filtering * language accepts strings like `"displayName=tokyo"`, and is documented in * more detail in [AIP-160](https://google.aip.dev/160). */ filter?: string; /** * The maximum number of results to return. If not set, the service selects a * default. */ pageSize?: number; /** * A page token received from the `next_page_token` field in the response. * Send that page token to receive the subsequent page. */ pageToken?: string; } /** * Additional options for Datastream#projectsLocationsOperationsList. */ export interface ProjectsLocationsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * When set to `true`, operations that are reachable are returned as normal, * and those that are unreachable are returned in the * ListOperationsResponse.unreachable field. This can only be `true` when * reading across collections. For example, when `parent` is set to * `"projects/example/locations/-"`. This field is not supported by default * and will result in an `UNIMPLEMENTED` error if set unless explicitly * documented otherwise in service or product specific documentation. */ returnPartialSuccess?: boolean; } /** * Additional options for Datastream#projectsLocationsPrivateConnectionsCreate. */ export interface ProjectsLocationsPrivateConnectionsCreateOptions { /** * Optional. If set to true, will skip validations. */ force?: boolean; /** * Required. The private connectivity identifier. */ privateConnectionId?: string; /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes since the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; /** * Optional. When supplied with PSC Interface config, will get/create the * tenant project required for the customer to allow list and won't actually * create the private connection. */ validateOnly?: boolean; } /** * Additional options for Datastream#projectsLocationsPrivateConnectionsDelete. */ export interface ProjectsLocationsPrivateConnectionsDeleteOptions { /** * Optional. If set to true, any child routes that belong to this * PrivateConnection will also be deleted. */ force?: boolean; /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes after the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; } /** * Additional options for Datastream#projectsLocationsPrivateConnectionsList. */ export interface ProjectsLocationsPrivateConnectionsListOptions { /** * Optional. Filter request. */ filter?: string; /** * Optional. Order by fields for the result. */ orderBy?: string; /** * Maximum number of private connectivity configurations to return. If * unspecified, at most 50 private connectivity configurations that will be * returned. The maximum value is 1000; values above 1000 will be coerced to * 1000. */ pageSize?: number; /** * Optional. Page token received from a previous `ListPrivateConnections` * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to `ListPrivateConnections` must match the call * that provided the page token. */ pageToken?: string; } /** * Additional options for * Datastream#projectsLocationsPrivateConnectionsRoutesCreate. */ export interface ProjectsLocationsPrivateConnectionsRoutesCreateOptions { /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes since the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; /** * Required. The Route identifier. */ routeId?: string; } /** * Additional options for * Datastream#projectsLocationsPrivateConnectionsRoutesDelete. */ export interface ProjectsLocationsPrivateConnectionsRoutesDeleteOptions { /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes after the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; } /** * Additional options for * Datastream#projectsLocationsPrivateConnectionsRoutesList. */ export interface ProjectsLocationsPrivateConnectionsRoutesListOptions { /** * Optional. Filter request. */ filter?: string; /** * Optional. Order by fields for the result. */ orderBy?: string; /** * Optional. Maximum number of Routes to return. The service may return fewer * than this value. If unspecified, at most 50 Routes will be returned. The * maximum value is 1000; values above 1000 will be coerced to 1000. */ pageSize?: number; /** * Optional. Page token received from a previous `ListRoutes` call. Provide * this to retrieve the subsequent page. When paginating, all other parameters * provided to `ListRoutes` must match the call that provided the page token. */ pageToken?: string; } /** * Additional options for Datastream#projectsLocationsStreamsCreate. */ export interface ProjectsLocationsStreamsCreateOptions { /** * Optional. Create the stream without validating it. */ force?: boolean; /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes since the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; /** * Required. The stream identifier. */ streamId?: string; /** * Optional. Only validate the stream, but don't create any resources. The * default is false. */ validateOnly?: boolean; } /** * Additional options for Datastream#projectsLocationsStreamsDelete. */ export interface ProjectsLocationsStreamsDeleteOptions { /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes after the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; } /** * Additional options for Datastream#projectsLocationsStreamsList. */ export interface ProjectsLocationsStreamsListOptions { /** * Optional. Filter request. */ filter?: string; /** * Optional. Order by fields for the result. */ orderBy?: string; /** * Optional. Maximum number of streams to return. If unspecified, at most 50 * streams will be returned. The maximum value is 1000; values above 1000 will * be coerced to 1000. */ pageSize?: number; /** * Optional. Page token received from a previous `ListStreams` call. Provide * this to retrieve the subsequent page. When paginating, all other parameters * provided to `ListStreams` must match the call that provided the page token. */ pageToken?: string; } /** * Additional options for Datastream#projectsLocationsStreamsObjectsList. */ export interface ProjectsLocationsStreamsObjectsListOptions { /** * Optional. Maximum number of objects to return. Default is 50. The maximum * value is 1000; values above 1000 will be coerced to 1000. */ pageSize?: number; /** * Optional. Page token received from a previous `ListStreamObjectsRequest` * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to `ListStreamObjectsRequest` must match the call * that provided the page token. */ pageToken?: string; } /** * Additional options for Datastream#projectsLocationsStreamsPatch. */ export interface ProjectsLocationsStreamsPatchOptions { /** * Optional. Update the stream without validating it. */ force?: boolean; /** * Optional. A request ID to identify requests. Specify a unique request ID * so that if you must retry your request, the server will know to ignore the * request if it has already been completed. The server will guarantee that * for at least 60 minutes since the first request. For example, consider a * situation where you make an initial request and the request times out. If * you make the request again with the same request ID, the server can check * if original operation with the same request ID was received, and if so, * will ignore the second request. This prevents clients from accidentally * creating duplicate commitments. The request ID must be a valid UUID with * the exception that zero UUID is not supported * (00000000-0000-0000-0000-000000000000). */ requestId?: string; /** * Optional. Field mask is used to specify the fields to be overwritten in * the stream resource by the update. The fields specified in the update_mask * are relative to the resource, not the full request. A field will be * overwritten if it is in the mask. If the user does not provide a mask then * all fields will be overwritten. */ updateMask?: string /* FieldMask */; /** * Optional. Only validate the stream with the changes, without actually * updating it. The default is false. */ validateOnly?: boolean; } function serializeProjectsLocationsStreamsPatchOptions(data: any): ProjectsLocationsStreamsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsStreamsPatchOptions(data: any): ProjectsLocationsStreamsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * The PSC Interface configuration is used to create PSC Interface between * Datastream and the consumer's PSC. */ export interface PscInterfaceConfig { /** * Required. Fully qualified name of the Network Attachment that Datastream * will connect to. Format: * `projects/{project}/regions/{region}/networkAttachments/{name}` */ networkAttachment?: string; } /** * Describes how a quota check failed. For example if a daily limit was * exceeded for the calling project, a service could respond with a QuotaFailure * detail containing the project id and the description of the quota limit that * was exceeded. If the calling project hasn't enabled the service in the * developer console, then a service could respond with the project id and set * `service_disabled` to true. Also see RetryInfo and Help types for other * details about handling a quota failure. */ export interface QuotaFailure { /** * Describes all quota violations. */ violations?: QuotaFailureViolation[]; } function serializeQuotaFailure(data: any): QuotaFailure { return { ...data, violations: data["violations"] !== undefined ? data["violations"].map((item: any) => (serializeQuotaFailureViolation(item))) : undefined, }; } function deserializeQuotaFailure(data: any): QuotaFailure { return { ...data, violations: data["violations"] !== undefined ? data["violations"].map((item: any) => (deserializeQuotaFailureViolation(item))) : undefined, }; } /** * A message type used to describe a single quota violation. For example, a * daily quota or a custom quota that was exceeded. */ export interface QuotaFailureViolation { /** * The API Service from which the `QuotaFailure.Violation` orginates. In some * cases, Quota issues originate from an API Service other than the one that * was called. In other words, a dependency of the called API Service could be * the cause of the `QuotaFailure`, and this field would have the dependency * API service name. For example, if the called API is Kubernetes Engine API * (container.googleapis.com), and a quota violation occurs in the Kubernetes * Engine API itself, this field would be "container.googleapis.com". On the * other hand, if the quota violation occurs when the Kubernetes Engine API * creates VMs in the Compute Engine API (compute.googleapis.com), this field * would be "compute.googleapis.com". */ apiService?: string; /** * A description of how the quota check failed. Clients can use this * description to find more about the quota configuration in the service's * public documentation, or find the relevant quota limit to adjust through * developer console. For example: "Service disabled" or "Daily Limit for read * operations exceeded". */ description?: string; /** * The new quota value being rolled out at the time of the violation. At the * completion of the rollout, this value will be enforced in place of * quota_value. If no rollout is in progress at the time of the violation, * this field is not set. For example, if at the time of the violation a * rollout is in progress changing the number of CPUs quota from 10 to 20, 20 * would be the value of this field. */ futureQuotaValue?: bigint; /** * The dimensions of the violated quota. Every non-global quota is enforced * on a set of dimensions. While quota metric defines what to count, the * dimensions specify for what aspects the counter should be increased. For * example, the quota "CPUs per region per VM family" enforces a limit on the * metric "compute.googleapis.com/cpus_per_vm_family" on dimensions "region" * and "vm_family". And if the violation occurred in region "us-central1" and * for VM family "n1", the quota_dimensions would be, { "region": * "us-central1", "vm_family": "n1", } When a quota is enforced globally, the * quota_dimensions would always be empty. */ quotaDimensions?: { [key: string]: string }; /** * The id of the violated quota. Also know as "limit name", this is the * unique identifier of a quota in the context of an API service. For example, * "CPUS-PER-VM-FAMILY-per-project-region". */ quotaId?: string; /** * The metric of the violated quota. A quota metric is a named counter to * measure usage, such as API requests or CPUs. When an activity occurs in a * service, such as Virtual Machine allocation, one or more quota metrics may * be affected. For example, "compute.googleapis.com/cpus_per_vm_family", * "storage.googleapis.com/internet_egress_bandwidth". */ quotaMetric?: string; /** * The enforced quota value at the time of the `QuotaFailure`. For example, * if the enforced quota value at the time of the `QuotaFailure` on the number * of CPUs is "10", then the value of this field would reflect this quantity. */ quotaValue?: bigint; /** * The subject on which the quota check failed. For example, "clientip:" or * "project:". */ subject?: string; } function serializeQuotaFailureViolation(data: any): QuotaFailureViolation { return { ...data, futureQuotaValue: data["futureQuotaValue"] !== undefined ? String(data["futureQuotaValue"]) : undefined, quotaValue: data["quotaValue"] !== undefined ? String(data["quotaValue"]) : undefined, }; } function deserializeQuotaFailureViolation(data: any): QuotaFailureViolation { return { ...data, futureQuotaValue: data["futureQuotaValue"] !== undefined ? BigInt(data["futureQuotaValue"]) : undefined, quotaValue: data["quotaValue"] !== undefined ? BigInt(data["quotaValue"]) : undefined, }; } /** * Contains metadata about the request that clients can attach when filing a * bug or providing other forms of feedback. */ export interface RequestInfo { /** * An opaque string that should only be interpreted by the service generating * it. For example, it can be used to identify requests in the service's logs. */ requestId?: string; /** * Any data that was used to serve this request. For example, an encrypted * stack trace that can be sent back to the service provider for debugging. */ servingData?: string; } /** * Describes the resource that is being accessed. */ export interface ResourceInfo { /** * Describes what error is encountered when accessing this resource. For * example, updating a cloud project may require the `writer` permission on * the developer console project. */ description?: string; /** * The owner of the resource (optional). For example, "user:" or "project:". */ owner?: string; /** * The name of the resource being accessed. For example, a shared calendar * name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current * error is google.rpc.Code.PERMISSION_DENIED. */ resourceName?: string; /** * A name for the type of resource being accessed, e.g. "sql table", "cloud * storage bucket", "file", "Google calendar"; or the type URL of the * resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". */ resourceType?: string; } /** * Describes when the clients can retry a failed request. Clients could ignore * the recommendation here or retry when this information is missing from error * responses. It's always recommended that clients should use exponential * backoff when retrying. Clients should wait until `retry_delay` amount of time * has passed since receiving the error response before retrying. If retrying * requests also fail, clients should use an exponential backoff scheme to * gradually increase the delay between retries based on `retry_delay`, until * either a maximum number of retries have been reached or a maximum retry delay * cap has been reached. */ export interface RetryInfo { /** * Clients should wait at least this long between retrying the same request. */ retryDelay?: number /* Duration */; } function serializeRetryInfo(data: any): RetryInfo { return { ...data, retryDelay: data["retryDelay"] !== undefined ? data["retryDelay"] : undefined, }; } function deserializeRetryInfo(data: any): RetryInfo { return { ...data, retryDelay: data["retryDelay"] !== undefined ? data["retryDelay"] : undefined, }; } /** * The route resource is the child of the private connection resource, used for * defining a route for a private connection. */ export interface Route { /** * Output only. The create time of the resource. */ readonly createTime?: Date; /** * Required. Destination address for connection */ destinationAddress?: string; /** * Destination port for connection */ destinationPort?: number; /** * Required. Display name. */ displayName?: string; /** * Labels. */ labels?: { [key: string]: string }; /** * Output only. Identifier. The resource's name. */ readonly name?: string; /** * Output only. The update time of the resource. */ readonly updateTime?: Date; } /** * A set of rules to apply to a set of objects. */ export interface RuleSet { /** * Required. List of customization rules to apply. */ customizationRules?: CustomizationRule[]; /** * Required. Object filter to apply the customization rules to. */ objectFilter?: ObjectFilter; } function serializeRuleSet(data: any): RuleSet { return { ...data, customizationRules: data["customizationRules"] !== undefined ? data["customizationRules"].map((item: any) => (serializeCustomizationRule(item))) : undefined, }; } function deserializeRuleSet(data: any): RuleSet { return { ...data, customizationRules: data["customizationRules"] !== undefined ? data["customizationRules"].map((item: any) => (deserializeCustomizationRule(item))) : undefined, }; } /** * Request message for running a stream. */ export interface RunStreamRequest { /** * Optional. The CDC strategy of the stream. If not set, the system's default * value will be used. */ cdcStrategy?: CdcStrategy; /** * Optional. Update the stream without validating it. */ force?: boolean; } function serializeRunStreamRequest(data: any): RunStreamRequest { return { ...data, cdcStrategy: data["cdcStrategy"] !== undefined ? serializeCdcStrategy(data["cdcStrategy"]) : undefined, }; } function deserializeRunStreamRequest(data: any): RunStreamRequest { return { ...data, cdcStrategy: data["cdcStrategy"] !== undefined ? deserializeCdcStrategy(data["cdcStrategy"]) : undefined, }; } /** * Salesforce field. */ export interface SalesforceField { /** * The data type. */ dataType?: string; /** * The field name. */ name?: string; /** * Indicates whether the field can accept nil values. */ nillable?: boolean; } /** * Salesforce object. */ export interface SalesforceObject { /** * Salesforce fields. When unspecified as part of include objects, includes * everything, when unspecified as part of exclude objects, excludes nothing. */ fields?: SalesforceField[]; /** * The object name. */ objectName?: string; } /** * Salesforce data source object identifier. */ export interface SalesforceObjectIdentifier { /** * Required. The object name. */ objectName?: string; } /** * Salesforce organization structure. */ export interface SalesforceOrg { /** * Salesforce objects in the database server. */ objects?: SalesforceObject[]; } /** * Profile for connecting to a Salesforce source. */ export interface SalesforceProfile { /** * Required. Domain endpoint for the Salesforce connection. */ domain?: string; /** * Connected app authentication. */ oauth2ClientCredentials?: Oauth2ClientCredentials; /** * User-password authentication. */ userCredentials?: UserCredentials; } /** * Configuration for syncing data from a Salesforce source. */ export interface SalesforceSourceConfig { /** * The Salesforce objects to exclude from the stream. */ excludeObjects?: SalesforceOrg; /** * The Salesforce objects to retrieve from the source. */ includeObjects?: SalesforceOrg; /** * Required. Salesforce objects polling interval. The interval at which new * changes will be polled for each object. The duration must be from `5 * minutes` to `24 hours`, inclusive. */ pollingInterval?: number /* Duration */; } function serializeSalesforceSourceConfig(data: any): SalesforceSourceConfig { return { ...data, pollingInterval: data["pollingInterval"] !== undefined ? data["pollingInterval"] : undefined, }; } function deserializeSalesforceSourceConfig(data: any): SalesforceSourceConfig { return { ...data, pollingInterval: data["pollingInterval"] !== undefined ? data["pollingInterval"] : undefined, }; } /** * Message represents the option where Datastream will enforce the encryption * and authenticate the server identity as well as the client identity. * ca_certificate, client_certificate and client_key must be set if user selects * this option. */ export interface ServerAndClientVerification { /** * Required. Input only. PEM-encoded server root CA certificate. */ caCertificate?: string; /** * Required. Input only. PEM-encoded certificate used by the source database * to authenticate the client identity (i.e., the Datastream's identity). This * certificate is signed by either a root certificate trusted by the server or * one or more intermediate certificates (which is stored with the leaf * certificate) to link the this certificate to the trusted root certificate. */ clientCertificate?: string; /** * Optional. Input only. PEM-encoded private key associated with the client * certificate. This value will be used during the SSL/TLS handshake, allowing * the PostgreSQL server to authenticate the client's identity, i.e. identity * of the Datastream. */ clientKey?: string; /** * Optional. The hostname mentioned in the Subject or SAN extension of the * server certificate. If this field is not provided, the hostname in the * server certificate is not validated. */ serverCertificateHostname?: string; } /** * Message represents the option where Datastream will enforce the encryption * and authenticate the server identity. ca_certificate must be set if user * selects this option. */ export interface ServerVerification { /** * Required. Input only. PEM-encoded server root CA certificate. */ caCertificate?: string; /** * Optional. The hostname mentioned in the Subject or SAN extension of the * server certificate. If this field is not provided, the hostname in the * server certificate is not validated. */ serverCertificateHostname?: string; } /** * A single target dataset to which all data will be streamed. */ export interface SingleTargetDataset { /** * The dataset ID of the target dataset. DatasetIds allowed characters: * https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#datasetreference. */ datasetId?: string; } /** * The configuration of the stream source. */ export interface SourceConfig { /** * MongoDB data source configuration. */ mongodbSourceConfig?: MongodbSourceConfig; /** * MySQL data source configuration. */ mysqlSourceConfig?: MysqlSourceConfig; /** * Oracle data source configuration. */ oracleSourceConfig?: OracleSourceConfig; /** * PostgreSQL data source configuration. */ postgresqlSourceConfig?: PostgresqlSourceConfig; /** * Salesforce data source configuration. */ salesforceSourceConfig?: SalesforceSourceConfig; /** * Required. Source connection profile resource. Format: * `projects/{project}/locations/{location}/connectionProfiles/{name}` */ sourceConnectionProfile?: string; /** * Spanner data source configuration. */ spannerSourceConfig?: SpannerSourceConfig; /** * SQLServer data source configuration. */ sqlServerSourceConfig?: SqlServerSourceConfig; } function serializeSourceConfig(data: any): SourceConfig { return { ...data, salesforceSourceConfig: data["salesforceSourceConfig"] !== undefined ? serializeSalesforceSourceConfig(data["salesforceSourceConfig"]) : undefined, spannerSourceConfig: data["spannerSourceConfig"] !== undefined ? serializeSpannerSourceConfig(data["spannerSourceConfig"]) : undefined, }; } function deserializeSourceConfig(data: any): SourceConfig { return { ...data, salesforceSourceConfig: data["salesforceSourceConfig"] !== undefined ? deserializeSalesforceSourceConfig(data["salesforceSourceConfig"]) : undefined, spannerSourceConfig: data["spannerSourceConfig"] !== undefined ? deserializeSpannerSourceConfig(data["spannerSourceConfig"]) : undefined, }; } /** * Destination datasets are created so that hierarchy of the destination data * objects matches the source hierarchy. */ export interface SourceHierarchyDatasets { /** * The dataset template to use for dynamic dataset creation. */ datasetTemplate?: DatasetTemplate; /** * Optional. The project id of the BigQuery dataset. If not specified, the * project will be inferred from the stream resource. */ projectId?: string; } /** * Represents an identifier of an object in the data source. */ export interface SourceObjectIdentifier { /** * MongoDB data source object identifier. */ mongodbIdentifier?: MongodbObjectIdentifier; /** * Mysql data source object identifier. */ mysqlIdentifier?: MysqlObjectIdentifier; /** * Oracle data source object identifier. */ oracleIdentifier?: OracleObjectIdentifier; /** * PostgreSQL data source object identifier. */ postgresqlIdentifier?: PostgresqlObjectIdentifier; /** * Salesforce data source object identifier. */ salesforceIdentifier?: SalesforceObjectIdentifier; /** * Spanner data source object identifier. */ spannerIdentifier?: SpannerObjectIdentifier; /** * SQLServer data source object identifier. */ sqlServerIdentifier?: SqlServerObjectIdentifier; } /** * Spanner column. */ export interface SpannerColumn { /** * Required. The column name. */ column?: string; /** * Optional. Spanner data type. */ dataType?: string; /** * Optional. Whether or not the column is a primary key. */ isPrimaryKey?: boolean; /** * Optional. The ordinal position of the column in the table. */ ordinalPosition?: bigint; } function serializeSpannerColumn(data: any): SpannerColumn { return { ...data, ordinalPosition: data["ordinalPosition"] !== undefined ? String(data["ordinalPosition"]) : undefined, }; } function deserializeSpannerColumn(data: any): SpannerColumn { return { ...data, ordinalPosition: data["ordinalPosition"] !== undefined ? BigInt(data["ordinalPosition"]) : undefined, }; } /** * Spanner database structure. */ export interface SpannerDatabase { /** * Optional. Spanner schemas in the database. */ schemas?: SpannerSchema[]; } function serializeSpannerDatabase(data: any): SpannerDatabase { return { ...data, schemas: data["schemas"] !== undefined ? data["schemas"].map((item: any) => (serializeSpannerSchema(item))) : undefined, }; } function deserializeSpannerDatabase(data: any): SpannerDatabase { return { ...data, schemas: data["schemas"] !== undefined ? data["schemas"].map((item: any) => (deserializeSpannerSchema(item))) : undefined, }; } /** * Spanner data source object identifier. */ export interface SpannerObjectIdentifier { /** * Optional. The schema name. */ schema?: string; /** * Required. The table name. */ table?: string; } /** * Profile for connecting to a Spanner source. */ export interface SpannerProfile { /** * Required. Immutable. Cloud Spanner database resource. This field is * immutable. Must be in the format: * projects/{project}/instances/{instance}/databases/{database_id}. */ database?: string; /** * Optional. The Spanner endpoint to connect to. Defaults to the global * endpoint (https://spanner.googleapis.com). Must be in the format: * https://spanner.{region}.rep.googleapis.com. */ host?: string; } /** * Spanner schema. */ export interface SpannerSchema { /** * Required. The schema name. */ schema?: string; /** * Optional. Spanner tables in the schema. */ tables?: SpannerTable[]; } function serializeSpannerSchema(data: any): SpannerSchema { return { ...data, tables: data["tables"] !== undefined ? data["tables"].map((item: any) => (serializeSpannerTable(item))) : undefined, }; } function deserializeSpannerSchema(data: any): SpannerSchema { return { ...data, tables: data["tables"] !== undefined ? data["tables"].map((item: any) => (deserializeSpannerTable(item))) : undefined, }; } /** * Configuration for syncing data from a Spanner source. */ export interface SpannerSourceConfig { /** * Optional. Whether to use Data Boost for Spanner backfills. Defaults to * false if not set. */ backfillDataBoostEnabled?: boolean; /** * Required. Immutable. The change stream name to use for the stream. */ changeStreamName?: string; /** * Optional. The Spanner objects to avoid retrieving. If some objects are * both included and excluded, an error will be thrown. */ excludeObjects?: SpannerDatabase; /** * Optional. The FGAC role to use for the stream. */ fgacRole?: string; /** * Optional. The Spanner objects to retrieve from the data source. If some * objects are both included and excluded, an error will be thrown. */ includeObjects?: SpannerDatabase; /** * Optional. Maximum number of concurrent backfill tasks. */ maxConcurrentBackfillTasks?: number; /** * Optional. Maximum number of concurrent CDC tasks. */ maxConcurrentCdcTasks?: number; /** * Optional. The RPC priority to use for the stream. */ spannerRpcPriority?: | "SPANNER_RPC_PRIORITY_UNSPECIFIED" | "LOW" | "MEDIUM" | "HIGH"; } function serializeSpannerSourceConfig(data: any): SpannerSourceConfig { return { ...data, excludeObjects: data["excludeObjects"] !== undefined ? serializeSpannerDatabase(data["excludeObjects"]) : undefined, includeObjects: data["includeObjects"] !== undefined ? serializeSpannerDatabase(data["includeObjects"]) : undefined, }; } function deserializeSpannerSourceConfig(data: any): SpannerSourceConfig { return { ...data, excludeObjects: data["excludeObjects"] !== undefined ? deserializeSpannerDatabase(data["excludeObjects"]) : undefined, includeObjects: data["includeObjects"] !== undefined ? deserializeSpannerDatabase(data["includeObjects"]) : undefined, }; } /** * Spanner table. */ export interface SpannerTable { /** * Optional. Spanner columns in the table. */ columns?: SpannerColumn[]; /** * Required. The table name. */ table?: string; } function serializeSpannerTable(data: any): SpannerTable { return { ...data, columns: data["columns"] !== undefined ? data["columns"].map((item: any) => (serializeSpannerColumn(item))) : undefined, }; } function deserializeSpannerTable(data: any): SpannerTable { return { ...data, columns: data["columns"] !== undefined ? data["columns"].map((item: any) => (deserializeSpannerColumn(item))) : undefined, }; } /** * CDC strategy to start replicating from a specific position in the source. */ export interface SpecificStartPosition { /** * MongoDB change stream position to start replicating from. */ mongodbChangeStreamPosition?: MongodbChangeStreamPosition; /** * MySQL GTID set to start replicating from. */ mysqlGtidPosition?: MysqlGtidPosition; /** * MySQL specific log position to start replicating from. */ mysqlLogPosition?: MysqlLogPosition; /** * Oracle SCN to start replicating from. */ oracleScnPosition?: OracleScnPosition; /** * SqlServer LSN to start replicating from. */ sqlServerLsnPosition?: SqlServerLsnPosition; } function serializeSpecificStartPosition(data: any): SpecificStartPosition { return { ...data, mongodbChangeStreamPosition: data["mongodbChangeStreamPosition"] !== undefined ? serializeMongodbChangeStreamPosition(data["mongodbChangeStreamPosition"]) : undefined, oracleScnPosition: data["oracleScnPosition"] !== undefined ? serializeOracleScnPosition(data["oracleScnPosition"]) : undefined, }; } function deserializeSpecificStartPosition(data: any): SpecificStartPosition { return { ...data, mongodbChangeStreamPosition: data["mongodbChangeStreamPosition"] !== undefined ? deserializeMongodbChangeStreamPosition(data["mongodbChangeStreamPosition"]) : undefined, oracleScnPosition: data["oracleScnPosition"] !== undefined ? deserializeOracleScnPosition(data["oracleScnPosition"]) : undefined, }; } /** * Configuration to use Change Tables CDC read method. */ export interface SqlServerChangeTables { } /** * SQLServer Column. */ export interface SqlServerColumn { /** * The column name. */ column?: string; /** * The SQLServer data type. */ dataType?: string; /** * Column length. */ length?: number; /** * Whether or not the column can accept a null value. */ nullable?: boolean; /** * The ordinal position of the column in the table. */ ordinalPosition?: number; /** * Column precision. */ precision?: number; /** * Whether or not the column represents a primary key. */ primaryKey?: boolean; /** * Column scale. */ scale?: number; } /** * SQL Server LSN position */ export interface SqlServerLsnPosition { /** * Required. Log sequence number (LSN) from where Logs will be read */ lsn?: string; } /** * SQLServer data source object identifier. */ export interface SqlServerObjectIdentifier { /** * Required. The schema name. */ schema?: string; /** * Required. The table name. */ table?: string; } /** * Profile for connecting to a SQLServer source. */ export interface SqlServerProfile { /** * Required. Database for the SQLServer connection. */ database?: string; /** * Required. Hostname for the SQLServer connection. */ hostname?: string; /** * Optional. Password for the SQLServer connection. Mutually exclusive with * the `secret_manager_stored_password` field. */ password?: string; /** * Port for the SQLServer connection, default value is 1433. */ port?: number; /** * Optional. A reference to a Secret Manager resource name storing the * SQLServer connection password. Mutually exclusive with the `password` * field. */ secretManagerStoredPassword?: string; /** * Optional. SSL configuration for the SQLServer connection. */ sslConfig?: SqlServerSslConfig; /** * Required. Username for the SQLServer connection. */ username?: string; } /** * SQLServer database structure. */ export interface SqlServerRdbms { /** * SQLServer schemas in the database server. */ schemas?: SqlServerSchema[]; } /** * SQLServer schema. */ export interface SqlServerSchema { /** * The schema name. */ schema?: string; /** * Tables in the schema. */ tables?: SqlServerTable[]; } /** * Configuration for syncing data from a SQLServer source. */ export interface SqlServerSourceConfig { /** * CDC reader reads from change tables. */ changeTables?: SqlServerChangeTables; /** * The SQLServer objects to exclude from the stream. */ excludeObjects?: SqlServerRdbms; /** * The SQLServer objects to include in the stream. */ includeObjects?: SqlServerRdbms; /** * Max concurrent backfill tasks. */ maxConcurrentBackfillTasks?: number; /** * Max concurrent CDC tasks. */ maxConcurrentCdcTasks?: number; /** * CDC reader reads from transaction logs. */ transactionLogs?: SqlServerTransactionLogs; } /** * SQL Server SSL configuration information. */ export interface SqlServerSslConfig { /** * If set, Datastream will enforce encryption without authenticating server * identity. Server certificates will be trusted by default. */ basicEncryption?: BasicEncryption; /** * If set, Datastream will enforce encryption and authenticate server * identity. */ encryptionAndServerValidation?: EncryptionAndServerValidation; /** * If set, Datastream will not enforce encryption. If the DB server mandates * encryption, then connection will be encrypted but server identity will not * be authenticated. */ encryptionNotEnforced?: EncryptionNotEnforced; } /** * SQLServer table. */ export interface SqlServerTable { /** * SQLServer columns in the schema. When unspecified as part of * include/exclude objects, includes/excludes everything. */ columns?: SqlServerColumn[]; /** * The table name. */ table?: string; } /** * Configuration to use Transaction Logs CDC read method. */ export interface SqlServerTransactionLogs { } /** * Srv connection format. */ export interface SrvConnectionFormat { } /** * Standard connection format. */ export interface StandardConnectionFormat { /** * Optional. Deprecated: Use the `additional_options` map to specify the * `directConnection` parameter instead. For example: `additional_options = * {"directConnection": "true"}`. Specifies whether the client connects * directly to the host[:port] in the connection URI. */ directConnection?: boolean; } /** * Request for manually initiating a backfill job for a specific stream object. */ export interface StartBackfillJobRequest { /** * Optional. Optional event filter. If not set, or empty, the backfill will * be performed on the entire object. This is currently used for partial * backfill and only supported for SQL Server sources. */ eventFilter?: EventFilter; } /** * Response for manually initiating a backfill job for a specific stream * object. */ export interface StartBackfillJobResponse { /** * The stream object resource a backfill job was started for. */ object?: StreamObject; } /** * Static IP address connectivity. Used when the source database is configured * to allow incoming connections from the Datastream public IP addresses for the * region specified in the connection profile. */ export interface StaticServiceIpConnectivity { } /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains three * pieces of data: error code, error message, and error details. You can find * out more about this error model and how to work with it in the [API Design * Guide](https://cloud.google.com/apis/design/errors). */ export interface Status { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. */ details?: { [key: string]: any }[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } /** * Request for manually stopping a running backfill job for a specific stream * object. */ export interface StopBackfillJobRequest { } /** * Response for manually stop a backfill job for a specific stream object. */ export interface StopBackfillJobResponse { /** * The stream object resource the backfill job was stopped for. */ object?: StreamObject; } /** * A resource representing streaming data from a source to a destination. */ export interface Stream { /** * Automatically backfill objects included in the stream source * configuration. Specific objects can be excluded. */ backfillAll?: BackfillAllStrategy; /** * Do not automatically backfill any objects. */ backfillNone?: BackfillNoneStrategy; /** * Output only. The creation time of the stream. */ readonly createTime?: Date; /** * Immutable. A reference to a KMS encryption key. If provided, it will be * used to encrypt the data. If left blank, data will be encrypted using an * internal Stream-specific encryption key provisioned through KMS. */ customerManagedEncryptionKey?: string; /** * Required. Destination connection profile configuration. */ destinationConfig?: DestinationConfig; /** * Required. Display name. */ displayName?: string; /** * Output only. Errors on the Stream. */ readonly errors?: Error[]; /** * Labels. */ labels?: { [key: string]: string }; /** * Output only. If the stream was recovered, the time of the last recovery. * Note: This field is currently experimental. */ readonly lastRecoveryTime?: Date; /** * Output only. Identifier. The stream's name. */ readonly name?: string; /** * Optional. Rule sets to apply to the stream. */ ruleSets?: RuleSet[]; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Required. Source connection profile configuration. */ sourceConfig?: SourceConfig; /** * The state of the stream. */ state?: | "STATE_UNSPECIFIED" | "NOT_STARTED" | "RUNNING" | "PAUSED" | "MAINTENANCE" | "FAILED" | "FAILED_PERMANENTLY" | "STARTING" | "DRAINING"; /** * Output only. The last update time of the stream. */ readonly updateTime?: Date; } function serializeStream(data: any): Stream { return { ...data, backfillAll: data["backfillAll"] !== undefined ? serializeBackfillAllStrategy(data["backfillAll"]) : undefined, destinationConfig: data["destinationConfig"] !== undefined ? serializeDestinationConfig(data["destinationConfig"]) : undefined, ruleSets: data["ruleSets"] !== undefined ? data["ruleSets"].map((item: any) => (serializeRuleSet(item))) : undefined, sourceConfig: data["sourceConfig"] !== undefined ? serializeSourceConfig(data["sourceConfig"]) : undefined, }; } function deserializeStream(data: any): Stream { return { ...data, backfillAll: data["backfillAll"] !== undefined ? deserializeBackfillAllStrategy(data["backfillAll"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, destinationConfig: data["destinationConfig"] !== undefined ? deserializeDestinationConfig(data["destinationConfig"]) : undefined, errors: data["errors"] !== undefined ? data["errors"].map((item: any) => (deserializeError(item))) : undefined, lastRecoveryTime: data["lastRecoveryTime"] !== undefined ? new Date(data["lastRecoveryTime"]) : undefined, ruleSets: data["ruleSets"] !== undefined ? data["ruleSets"].map((item: any) => (deserializeRuleSet(item))) : undefined, sourceConfig: data["sourceConfig"] !== undefined ? deserializeSourceConfig(data["sourceConfig"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Configuration to stream large object values. */ export interface StreamLargeObjects { } /** * A specific stream object (e.g a specific DB table). */ export interface StreamObject { /** * The latest backfill job that was initiated for the stream object. */ backfillJob?: BackfillJob; /** * Output only. The creation time of the object. */ readonly createTime?: Date; /** * Output only. The customization rules for the object. These rules are * derived from the parent Stream's `rule_sets` and represent the intended * configuration for the object. */ readonly customizationRules?: CustomizationRule[]; /** * Required. Display name. */ displayName?: string; /** * Output only. Active errors on the object. */ readonly errors?: Error[]; /** * Output only. Identifier. The object resource's name. */ readonly name?: string; /** * The object identifier in the data source. */ sourceObject?: SourceObjectIdentifier; /** * Output only. The last update time of the object. */ readonly updateTime?: Date; } /** * Time unit column partitioning. see * https://cloud.google.com/bigquery/docs/partitioned-tables#date_timestamp_partitioned_tables */ export interface TimeUnitPartition { /** * Required. The partitioning column. */ column?: string; /** * Optional. Partition granularity. */ partitioningTimeGranularity?: | "PARTITIONING_TIME_GRANULARITY_UNSPECIFIED" | "PARTITIONING_TIME_GRANULARITY_HOUR" | "PARTITIONING_TIME_GRANULARITY_DAY" | "PARTITIONING_TIME_GRANULARITY_MONTH" | "PARTITIONING_TIME_GRANULARITY_YEAR"; } /** * Username-password credentials. */ export interface UserCredentials { /** * Optional. Password for the Salesforce connection. Mutually exclusive with * the `secret_manager_stored_password` field. */ password?: string; /** * Optional. A reference to a Secret Manager resource name storing the * Salesforce connection's password. Mutually exclusive with the `password` * field. */ secretManagerStoredPassword?: string; /** * Optional. A reference to a Secret Manager resource name storing the * Salesforce connection's security token. Mutually exclusive with the * `security_token` field. */ secretManagerStoredSecurityToken?: string; /** * Optional. Security token for the Salesforce connection. Mutually exclusive * with the `secret_manager_stored_security_token` field. */ securityToken?: string; /** * Required. Username for the Salesforce connection. */ username?: string; } /** * A validation to perform on a stream. */ export interface Validation { /** * A custom code identifying this validation. */ code?: string; /** * A short description of the validation. */ description?: string; /** * Messages reflecting the validation results. */ message?: ValidationMessage[]; /** * Output only. Validation execution status. */ readonly state?: | "STATE_UNSPECIFIED" | "NOT_EXECUTED" | "FAILED" | "PASSED" | "WARNING"; } /** * Represent user-facing validation result message. */ export interface ValidationMessage { /** * A custom code identifying this specific message. */ code?: string; /** * Message severity level (warning or error). */ level?: | "LEVEL_UNSPECIFIED" | "WARNING" | "ERROR"; /** * The result of the validation. */ message?: string; /** * Additional metadata related to the result. */ metadata?: { [key: string]: string }; } /** * Contains the current validation results. */ export interface ValidationResult { /** * A list of validations (includes both executed as well as not executed * validations). */ validations?: Validation[]; } /** * The VPC Peering configuration is used to create VPC peering between * Datastream and the consumer's VPC. */ export interface VpcPeeringConfig { /** * Required. A free subnet for peering. (CIDR of /29) */ subnet?: string; /** * Required. Fully qualified name of the VPC that Datastream will peer to. * Format: `projects/{project}/global/{networks}/{name}` */ vpc?: string; }