DataBrew - AWS API Codegen

API Documentation | Deno Module Docs | Original docs from AWS-JS-SDK

Customize Generated Module

Only include specific operations:
Include documentation comments:

Example Import

import { DataBrew } from "https://aws-api.deno.dev/v0.4/services/databrew.ts?docs=full";


Generated Source (the actual code)

// Generation parameters:
//   aws-sdk-js definitions from v2.1323.0
//   AWS service UID: databrew-2017-07-25
//   code generation: v0.4
//   extra options:
//     "docs" = "full"
//   generated at: 2025-03-14

// Originally served at https://aws-api.deno.dev/v0.4/services/databrew.ts?docs=full

// Autogenerated API client for: AWS Glue DataBrew

import * as client from "https://deno.land/x/aws_api@v0.8.1/client/common.ts";
import * as cmnP from "https://deno.land/x/aws_api@v0.8.1/encoding/common.ts";
import * as jsonP from "https://deno.land/x/aws_api@v0.8.1/encoding/json.ts";

export class DataBrew {
  #client: client.ServiceClient;
  constructor(apiFactory: client.ApiFactory) {
    this.#client = apiFactory.buildServiceClient(DataBrew.ApiMetadata);
  }

  static ApiMetadata: client.ApiMetadata = {
    "apiVersion": "2017-07-25",
    "endpointPrefix": "databrew",
    "jsonVersion": "1.1",
    "protocol": "rest-json",
    "serviceFullName": "AWS Glue DataBrew",
    "serviceId": "DataBrew",
    "signatureVersion": "v4",
    "signingName": "databrew",
    "uid": "databrew-2017-07-25"
  };

  /**
   * Deletes one or more versions of a recipe at a time.
   *
   * The entire request will be rejected if:
   *
   *   - The recipe does not exist.
   *   - There is an invalid version identifier in the list of versions.
   *   - The version list is empty.
   *   - The version list size exceeds 50.
   *   - The version list contains duplicate entries.
   *
   * The request will complete successfully, but with partial failures, if:
   *
   *   - A version does not exist.
   *   - A version is being used by a job.
   *   - You specify `LATEST_WORKING`, but it's being used by a project.
   *   - The version fails to be deleted.
   *
   * The `LATEST_WORKING` version will only be deleted if the recipe has no other versions.
   * If you try to delete `LATEST_WORKING` while other versions exist (or if they can't be deleted), then `LATEST_WORKING` will be listed as partial failure in the response.
   */
  async batchDeleteRecipeVersion(
    params: BatchDeleteRecipeVersionRequest,
    opts: client.RequestOptions = {},
  ): Promise<BatchDeleteRecipeVersionResponse> {
    const body: jsonP.JSONObject = {
      RecipeVersions: params["RecipeVersions"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "BatchDeleteRecipeVersion",
      requestUri: cmnP.encodePath`/recipes/${params["Name"]}/batchDeleteRecipeVersion`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "Errors": [toRecipeVersionErrorDetail],
      },
    }, await resp.json());
  }

  /** Creates a new DataBrew dataset. */
  async createDataset(
    params: CreateDatasetRequest,
    opts: client.RequestOptions = {},
  ): Promise<CreateDatasetResponse> {
    const body: jsonP.JSONObject = {
      Name: params["Name"],
      Format: params["Format"],
      FormatOptions: fromFormatOptions(params["FormatOptions"]),
      Input: fromInput(params["Input"]),
      PathOptions: fromPathOptions(params["PathOptions"]),
      Tags: params["Tags"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "CreateDataset",
      requestUri: "/datasets",
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Creates a new job to analyze a dataset and create its data profile. */
  async createProfileJob(
    params: CreateProfileJobRequest,
    opts: client.RequestOptions = {},
  ): Promise<CreateProfileJobResponse> {
    const body: jsonP.JSONObject = {
      DatasetName: params["DatasetName"],
      EncryptionKeyArn: params["EncryptionKeyArn"],
      EncryptionMode: params["EncryptionMode"],
      Name: params["Name"],
      LogSubscription: params["LogSubscription"],
      MaxCapacity: params["MaxCapacity"],
      MaxRetries: params["MaxRetries"],
      OutputLocation: fromS3Location(params["OutputLocation"]),
      Configuration: fromProfileConfiguration(params["Configuration"]),
      ValidationConfigurations: params["ValidationConfigurations"]?.map(x => fromValidationConfiguration(x)),
      RoleArn: params["RoleArn"],
      Tags: params["Tags"],
      Timeout: params["Timeout"],
      JobSample: fromJobSample(params["JobSample"]),
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "CreateProfileJob",
      requestUri: "/profileJobs",
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Creates a new DataBrew project. */
  async createProject(
    params: CreateProjectRequest,
    opts: client.RequestOptions = {},
  ): Promise<CreateProjectResponse> {
    const body: jsonP.JSONObject = {
      DatasetName: params["DatasetName"],
      Name: params["Name"],
      RecipeName: params["RecipeName"],
      Sample: fromSample(params["Sample"]),
      RoleArn: params["RoleArn"],
      Tags: params["Tags"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "CreateProject",
      requestUri: "/projects",
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Creates a new DataBrew recipe. */
  async createRecipe(
    params: CreateRecipeRequest,
    opts: client.RequestOptions = {},
  ): Promise<CreateRecipeResponse> {
    const body: jsonP.JSONObject = {
      Description: params["Description"],
      Name: params["Name"],
      Steps: params["Steps"]?.map(x => fromRecipeStep(x)),
      Tags: params["Tags"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "CreateRecipe",
      requestUri: "/recipes",
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Creates a new job to transform input data, using steps defined in an existing Glue DataBrew recipe */
  async createRecipeJob(
    params: CreateRecipeJobRequest,
    opts: client.RequestOptions = {},
  ): Promise<CreateRecipeJobResponse> {
    const body: jsonP.JSONObject = {
      DatasetName: params["DatasetName"],
      EncryptionKeyArn: params["EncryptionKeyArn"],
      EncryptionMode: params["EncryptionMode"],
      Name: params["Name"],
      LogSubscription: params["LogSubscription"],
      MaxCapacity: params["MaxCapacity"],
      MaxRetries: params["MaxRetries"],
      Outputs: params["Outputs"]?.map(x => fromOutput(x)),
      DataCatalogOutputs: params["DataCatalogOutputs"]?.map(x => fromDataCatalogOutput(x)),
      DatabaseOutputs: params["DatabaseOutputs"]?.map(x => fromDatabaseOutput(x)),
      ProjectName: params["ProjectName"],
      RecipeReference: fromRecipeReference(params["RecipeReference"]),
      RoleArn: params["RoleArn"],
      Tags: params["Tags"],
      Timeout: params["Timeout"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "CreateRecipeJob",
      requestUri: "/recipeJobs",
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Creates a new ruleset that can be used in a profile job to validate the data quality of a dataset. */
  async createRuleset(
    params: CreateRulesetRequest,
    opts: client.RequestOptions = {},
  ): Promise<CreateRulesetResponse> {
    const body: jsonP.JSONObject = {
      Name: params["Name"],
      Description: params["Description"],
      TargetArn: params["TargetArn"],
      Rules: params["Rules"]?.map(x => fromRule(x)),
      Tags: params["Tags"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "CreateRuleset",
      requestUri: "/rulesets",
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /**
   * Creates a new schedule for one or more DataBrew jobs.
   * Jobs can be run at a specific date and time, or at regular intervals.
   */
  async createSchedule(
    params: CreateScheduleRequest,
    opts: client.RequestOptions = {},
  ): Promise<CreateScheduleResponse> {
    const body: jsonP.JSONObject = {
      JobNames: params["JobNames"],
      CronExpression: params["CronExpression"],
      Tags: params["Tags"],
      Name: params["Name"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "CreateSchedule",
      requestUri: "/schedules",
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Deletes a dataset from DataBrew. */
  async deleteDataset(
    params: DeleteDatasetRequest,
    opts: client.RequestOptions = {},
  ): Promise<DeleteDatasetResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DeleteDataset",
      method: "DELETE",
      requestUri: cmnP.encodePath`/datasets/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Deletes the specified DataBrew job. */
  async deleteJob(
    params: DeleteJobRequest,
    opts: client.RequestOptions = {},
  ): Promise<DeleteJobResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DeleteJob",
      method: "DELETE",
      requestUri: cmnP.encodePath`/jobs/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Deletes an existing DataBrew project. */
  async deleteProject(
    params: DeleteProjectRequest,
    opts: client.RequestOptions = {},
  ): Promise<DeleteProjectResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DeleteProject",
      method: "DELETE",
      requestUri: cmnP.encodePath`/projects/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Deletes a single version of a DataBrew recipe. */
  async deleteRecipeVersion(
    params: DeleteRecipeVersionRequest,
    opts: client.RequestOptions = {},
  ): Promise<DeleteRecipeVersionResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DeleteRecipeVersion",
      method: "DELETE",
      requestUri: cmnP.encodePath`/recipes/${params["Name"]}/recipeVersion/${params["RecipeVersion"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
        "RecipeVersion": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Deletes a ruleset. */
  async deleteRuleset(
    params: DeleteRulesetRequest,
    opts: client.RequestOptions = {},
  ): Promise<DeleteRulesetResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DeleteRuleset",
      method: "DELETE",
      requestUri: cmnP.encodePath`/rulesets/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Deletes the specified DataBrew schedule. */
  async deleteSchedule(
    params: DeleteScheduleRequest,
    opts: client.RequestOptions = {},
  ): Promise<DeleteScheduleResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DeleteSchedule",
      method: "DELETE",
      requestUri: cmnP.encodePath`/schedules/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Returns the definition of a specific DataBrew dataset. */
  async describeDataset(
    params: DescribeDatasetRequest,
    opts: client.RequestOptions = {},
  ): Promise<DescribeDatasetResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DescribeDataset",
      method: "GET",
      requestUri: cmnP.encodePath`/datasets/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
        "Input": toInput,
      },
      optional: {
        "CreatedBy": "s",
        "CreateDate": "d",
        "Format": (x: jsonP.JSONValue) => cmnP.readEnum<InputFormat>(x),
        "FormatOptions": toFormatOptions,
        "LastModifiedDate": "d",
        "LastModifiedBy": "s",
        "Source": (x: jsonP.JSONValue) => cmnP.readEnum<Source>(x),
        "PathOptions": toPathOptions,
        "Tags": x => jsonP.readMap(String, String, x),
        "ResourceArn": "s",
      },
    }, await resp.json());
  }

  /** Returns the definition of a specific DataBrew job. */
  async describeJob(
    params: DescribeJobRequest,
    opts: client.RequestOptions = {},
  ): Promise<DescribeJobResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DescribeJob",
      method: "GET",
      requestUri: cmnP.encodePath`/jobs/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "CreateDate": "d",
        "CreatedBy": "s",
        "DatasetName": "s",
        "EncryptionKeyArn": "s",
        "EncryptionMode": (x: jsonP.JSONValue) => cmnP.readEnum<EncryptionMode>(x),
        "Type": (x: jsonP.JSONValue) => cmnP.readEnum<JobType>(x),
        "LastModifiedBy": "s",
        "LastModifiedDate": "d",
        "LogSubscription": (x: jsonP.JSONValue) => cmnP.readEnum<LogSubscription>(x),
        "MaxCapacity": "n",
        "MaxRetries": "n",
        "Outputs": [toOutput],
        "DataCatalogOutputs": [toDataCatalogOutput],
        "DatabaseOutputs": [toDatabaseOutput],
        "ProjectName": "s",
        "ProfileConfiguration": toProfileConfiguration,
        "ValidationConfigurations": [toValidationConfiguration],
        "RecipeReference": toRecipeReference,
        "ResourceArn": "s",
        "RoleArn": "s",
        "Tags": x => jsonP.readMap(String, String, x),
        "Timeout": "n",
        "JobSample": toJobSample,
      },
    }, await resp.json());
  }

  /** Represents one run of a DataBrew job. */
  async describeJobRun(
    params: DescribeJobRunRequest,
    opts: client.RequestOptions = {},
  ): Promise<DescribeJobRunResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DescribeJobRun",
      method: "GET",
      requestUri: cmnP.encodePath`/jobs/${params["Name"]}/jobRun/${params["RunId"]}`,
    });
    return jsonP.readObj({
      required: {
        "JobName": "s",
      },
      optional: {
        "Attempt": "n",
        "CompletedOn": "d",
        "DatasetName": "s",
        "ErrorMessage": "s",
        "ExecutionTime": "n",
        "ProfileConfiguration": toProfileConfiguration,
        "ValidationConfigurations": [toValidationConfiguration],
        "RunId": "s",
        "State": (x: jsonP.JSONValue) => cmnP.readEnum<JobRunState>(x),
        "LogSubscription": (x: jsonP.JSONValue) => cmnP.readEnum<LogSubscription>(x),
        "LogGroupName": "s",
        "Outputs": [toOutput],
        "DataCatalogOutputs": [toDataCatalogOutput],
        "DatabaseOutputs": [toDatabaseOutput],
        "RecipeReference": toRecipeReference,
        "StartedBy": "s",
        "StartedOn": "d",
        "JobSample": toJobSample,
      },
    }, await resp.json());
  }

  /** Returns the definition of a specific DataBrew project. */
  async describeProject(
    params: DescribeProjectRequest,
    opts: client.RequestOptions = {},
  ): Promise<DescribeProjectResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DescribeProject",
      method: "GET",
      requestUri: cmnP.encodePath`/projects/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "CreateDate": "d",
        "CreatedBy": "s",
        "DatasetName": "s",
        "LastModifiedDate": "d",
        "LastModifiedBy": "s",
        "RecipeName": "s",
        "ResourceArn": "s",
        "Sample": toSample,
        "RoleArn": "s",
        "Tags": x => jsonP.readMap(String, String, x),
        "SessionStatus": (x: jsonP.JSONValue) => cmnP.readEnum<SessionStatus>(x),
        "OpenedBy": "s",
        "OpenDate": "d",
      },
    }, await resp.json());
  }

  /** Returns the definition of a specific DataBrew recipe corresponding to a particular version. */
  async describeRecipe(
    params: DescribeRecipeRequest,
    opts: client.RequestOptions = {},
  ): Promise<DescribeRecipeResponse> {
    const query = new URLSearchParams;
    if (params["RecipeVersion"] != null) query.set("recipeVersion", params["RecipeVersion"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "DescribeRecipe",
      method: "GET",
      requestUri: cmnP.encodePath`/recipes/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "CreatedBy": "s",
        "CreateDate": "d",
        "LastModifiedBy": "s",
        "LastModifiedDate": "d",
        "ProjectName": "s",
        "PublishedBy": "s",
        "PublishedDate": "d",
        "Description": "s",
        "Steps": [toRecipeStep],
        "Tags": x => jsonP.readMap(String, String, x),
        "ResourceArn": "s",
        "RecipeVersion": "s",
      },
    }, await resp.json());
  }

  /** Retrieves detailed information about the ruleset. */
  async describeRuleset(
    params: DescribeRulesetRequest,
    opts: client.RequestOptions = {},
  ): Promise<DescribeRulesetResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DescribeRuleset",
      method: "GET",
      requestUri: cmnP.encodePath`/rulesets/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "Description": "s",
        "TargetArn": "s",
        "Rules": [toRule],
        "CreateDate": "d",
        "CreatedBy": "s",
        "LastModifiedBy": "s",
        "LastModifiedDate": "d",
        "ResourceArn": "s",
        "Tags": x => jsonP.readMap(String, String, x),
      },
    }, await resp.json());
  }

  /** Returns the definition of a specific DataBrew schedule. */
  async describeSchedule(
    params: DescribeScheduleRequest,
    opts: client.RequestOptions = {},
  ): Promise<DescribeScheduleResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "DescribeSchedule",
      method: "GET",
      requestUri: cmnP.encodePath`/schedules/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "CreateDate": "d",
        "CreatedBy": "s",
        "JobNames": ["s"],
        "LastModifiedBy": "s",
        "LastModifiedDate": "d",
        "ResourceArn": "s",
        "CronExpression": "s",
        "Tags": x => jsonP.readMap(String, String, x),
      },
    }, await resp.json());
  }

  /** Lists all of the DataBrew datasets. */
  async listDatasets(
    params: ListDatasetsRequest = {},
    opts: client.RequestOptions = {},
  ): Promise<ListDatasetsResponse> {
    const query = new URLSearchParams;
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListDatasets",
      method: "GET",
      requestUri: "/datasets",
    });
    return jsonP.readObj({
      required: {
        "Datasets": [toDataset],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** Lists all of the previous runs of a particular DataBrew job. */
  async listJobRuns(
    params: ListJobRunsRequest,
    opts: client.RequestOptions = {},
  ): Promise<ListJobRunsResponse> {
    const query = new URLSearchParams;
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListJobRuns",
      method: "GET",
      requestUri: cmnP.encodePath`/jobs/${params["Name"]}/jobRuns`,
    });
    return jsonP.readObj({
      required: {
        "JobRuns": [toJobRun],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** Lists all of the DataBrew jobs that are defined. */
  async listJobs(
    params: ListJobsRequest = {},
    opts: client.RequestOptions = {},
  ): Promise<ListJobsResponse> {
    const query = new URLSearchParams;
    if (params["DatasetName"] != null) query.set("datasetName", params["DatasetName"]?.toString() ?? "");
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    if (params["ProjectName"] != null) query.set("projectName", params["ProjectName"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListJobs",
      method: "GET",
      requestUri: "/jobs",
    });
    return jsonP.readObj({
      required: {
        "Jobs": [toJob],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** Lists all of the DataBrew projects that are defined. */
  async listProjects(
    params: ListProjectsRequest = {},
    opts: client.RequestOptions = {},
  ): Promise<ListProjectsResponse> {
    const query = new URLSearchParams;
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListProjects",
      method: "GET",
      requestUri: "/projects",
    });
    return jsonP.readObj({
      required: {
        "Projects": [toProject],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** Lists the versions of a particular DataBrew recipe, except for `LATEST_WORKING`. */
  async listRecipeVersions(
    params: ListRecipeVersionsRequest,
    opts: client.RequestOptions = {},
  ): Promise<ListRecipeVersionsResponse> {
    const query = new URLSearchParams;
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    query.set("name", params["Name"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListRecipeVersions",
      method: "GET",
      requestUri: "/recipeVersions",
    });
    return jsonP.readObj({
      required: {
        "Recipes": [toRecipe],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** Lists all of the DataBrew recipes that are defined. */
  async listRecipes(
    params: ListRecipesRequest = {},
    opts: client.RequestOptions = {},
  ): Promise<ListRecipesResponse> {
    const query = new URLSearchParams;
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    if (params["RecipeVersion"] != null) query.set("recipeVersion", params["RecipeVersion"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListRecipes",
      method: "GET",
      requestUri: "/recipes",
    });
    return jsonP.readObj({
      required: {
        "Recipes": [toRecipe],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** List all rulesets available in the current account or rulesets associated with a specific resource (dataset). */
  async listRulesets(
    params: ListRulesetsRequest = {},
    opts: client.RequestOptions = {},
  ): Promise<ListRulesetsResponse> {
    const query = new URLSearchParams;
    if (params["TargetArn"] != null) query.set("targetArn", params["TargetArn"]?.toString() ?? "");
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListRulesets",
      method: "GET",
      requestUri: "/rulesets",
    });
    return jsonP.readObj({
      required: {
        "Rulesets": [toRulesetItem],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** Lists the DataBrew schedules that are defined. */
  async listSchedules(
    params: ListSchedulesRequest = {},
    opts: client.RequestOptions = {},
  ): Promise<ListSchedulesResponse> {
    const query = new URLSearchParams;
    if (params["JobName"] != null) query.set("jobName", params["JobName"]?.toString() ?? "");
    if (params["MaxResults"] != null) query.set("maxResults", params["MaxResults"]?.toString() ?? "");
    if (params["NextToken"] != null) query.set("nextToken", params["NextToken"]?.toString() ?? "");
    const resp = await this.#client.performRequest({
      opts, query,
      action: "ListSchedules",
      method: "GET",
      requestUri: "/schedules",
    });
    return jsonP.readObj({
      required: {
        "Schedules": [toSchedule],
      },
      optional: {
        "NextToken": "s",
      },
    }, await resp.json());
  }

  /** Lists all the tags for a DataBrew resource. */
  async listTagsForResource(
    params: ListTagsForResourceRequest,
    opts: client.RequestOptions = {},
  ): Promise<ListTagsForResourceResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "ListTagsForResource",
      method: "GET",
      requestUri: cmnP.encodePath`/tags/${params["ResourceArn"]}`,
    });
    return jsonP.readObj({
      required: {},
      optional: {
        "Tags": x => jsonP.readMap(String, String, x),
      },
    }, await resp.json());
  }

  /** Publishes a new version of a DataBrew recipe. */
  async publishRecipe(
    params: PublishRecipeRequest,
    opts: client.RequestOptions = {},
  ): Promise<PublishRecipeResponse> {
    const body: jsonP.JSONObject = {
      Description: params["Description"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "PublishRecipe",
      requestUri: cmnP.encodePath`/recipes/${params["Name"]}/publishRecipe`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Performs a recipe step within an interactive DataBrew session that's currently open. */
  async sendProjectSessionAction(
    params: SendProjectSessionActionRequest,
    opts: client.RequestOptions = {},
  ): Promise<SendProjectSessionActionResponse> {
    const body: jsonP.JSONObject = {
      Preview: params["Preview"],
      RecipeStep: fromRecipeStep(params["RecipeStep"]),
      StepIndex: params["StepIndex"],
      ClientSessionId: params["ClientSessionId"],
      ViewFrame: fromViewFrame(params["ViewFrame"]),
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "SendProjectSessionAction",
      method: "PUT",
      requestUri: cmnP.encodePath`/projects/${params["Name"]}/sendProjectSessionAction`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "Result": "s",
        "ActionId": "n",
      },
    }, await resp.json());
  }

  /** Runs a DataBrew job. */
  async startJobRun(
    params: StartJobRunRequest,
    opts: client.RequestOptions = {},
  ): Promise<StartJobRunResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "StartJobRun",
      requestUri: cmnP.encodePath`/jobs/${params["Name"]}/startJobRun`,
    });
    return jsonP.readObj({
      required: {
        "RunId": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Creates an interactive session, enabling you to manipulate data in a DataBrew project. */
  async startProjectSession(
    params: StartProjectSessionRequest,
    opts: client.RequestOptions = {},
  ): Promise<StartProjectSessionResponse> {
    const body: jsonP.JSONObject = {
      AssumeControl: params["AssumeControl"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "StartProjectSession",
      method: "PUT",
      requestUri: cmnP.encodePath`/projects/${params["Name"]}/startProjectSession`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "ClientSessionId": "s",
      },
    }, await resp.json());
  }

  /** Stops a particular run of a job. */
  async stopJobRun(
    params: StopJobRunRequest,
    opts: client.RequestOptions = {},
  ): Promise<StopJobRunResponse> {

    const resp = await this.#client.performRequest({
      opts,
      action: "StopJobRun",
      requestUri: cmnP.encodePath`/jobs/${params["Name"]}/jobRun/${params["RunId"]}/stopJobRun`,
    });
    return jsonP.readObj({
      required: {
        "RunId": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Adds metadata tags to a DataBrew resource, such as a dataset, project, recipe, job, or schedule. */
  async tagResource(
    params: TagResourceRequest,
    opts: client.RequestOptions = {},
  ): Promise<void> {
    const body: jsonP.JSONObject = {
      Tags: params["Tags"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "TagResource",
      requestUri: cmnP.encodePath`/tags/${params["ResourceArn"]}`,
    });
    await resp.body?.cancel();
  }

  /** Removes metadata tags from a DataBrew resource. */
  async untagResource(
    params: UntagResourceRequest,
    opts: client.RequestOptions = {},
  ): Promise<void> {
    const query = new URLSearchParams;
    for (const item of params["TagKeys"]) {
      query.append("tagKeys", item?.toString() ?? "");
    }
    const resp = await this.#client.performRequest({
      opts, query,
      action: "UntagResource",
      method: "DELETE",
      requestUri: cmnP.encodePath`/tags/${params["ResourceArn"]}`,
    });
    await resp.body?.cancel();
  }

  /** Modifies the definition of an existing DataBrew dataset. */
  async updateDataset(
    params: UpdateDatasetRequest,
    opts: client.RequestOptions = {},
  ): Promise<UpdateDatasetResponse> {
    const body: jsonP.JSONObject = {
      Format: params["Format"],
      FormatOptions: fromFormatOptions(params["FormatOptions"]),
      Input: fromInput(params["Input"]),
      PathOptions: fromPathOptions(params["PathOptions"]),
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "UpdateDataset",
      method: "PUT",
      requestUri: cmnP.encodePath`/datasets/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Modifies the definition of an existing profile job. */
  async updateProfileJob(
    params: UpdateProfileJobRequest,
    opts: client.RequestOptions = {},
  ): Promise<UpdateProfileJobResponse> {
    const body: jsonP.JSONObject = {
      Configuration: fromProfileConfiguration(params["Configuration"]),
      EncryptionKeyArn: params["EncryptionKeyArn"],
      EncryptionMode: params["EncryptionMode"],
      LogSubscription: params["LogSubscription"],
      MaxCapacity: params["MaxCapacity"],
      MaxRetries: params["MaxRetries"],
      OutputLocation: fromS3Location(params["OutputLocation"]),
      ValidationConfigurations: params["ValidationConfigurations"]?.map(x => fromValidationConfiguration(x)),
      RoleArn: params["RoleArn"],
      Timeout: params["Timeout"],
      JobSample: fromJobSample(params["JobSample"]),
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "UpdateProfileJob",
      method: "PUT",
      requestUri: cmnP.encodePath`/profileJobs/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Modifies the definition of an existing DataBrew project. */
  async updateProject(
    params: UpdateProjectRequest,
    opts: client.RequestOptions = {},
  ): Promise<UpdateProjectResponse> {
    const body: jsonP.JSONObject = {
      Sample: fromSample(params["Sample"]),
      RoleArn: params["RoleArn"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "UpdateProject",
      method: "PUT",
      requestUri: cmnP.encodePath`/projects/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {
        "LastModifiedDate": "d",
      },
    }, await resp.json());
  }

  /** Modifies the definition of the `LATEST_WORKING` version of a DataBrew recipe. */
  async updateRecipe(
    params: UpdateRecipeRequest,
    opts: client.RequestOptions = {},
  ): Promise<UpdateRecipeResponse> {
    const body: jsonP.JSONObject = {
      Description: params["Description"],
      Steps: params["Steps"]?.map(x => fromRecipeStep(x)),
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "UpdateRecipe",
      method: "PUT",
      requestUri: cmnP.encodePath`/recipes/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Modifies the definition of an existing DataBrew recipe job. */
  async updateRecipeJob(
    params: UpdateRecipeJobRequest,
    opts: client.RequestOptions = {},
  ): Promise<UpdateRecipeJobResponse> {
    const body: jsonP.JSONObject = {
      EncryptionKeyArn: params["EncryptionKeyArn"],
      EncryptionMode: params["EncryptionMode"],
      LogSubscription: params["LogSubscription"],
      MaxCapacity: params["MaxCapacity"],
      MaxRetries: params["MaxRetries"],
      Outputs: params["Outputs"]?.map(x => fromOutput(x)),
      DataCatalogOutputs: params["DataCatalogOutputs"]?.map(x => fromDataCatalogOutput(x)),
      DatabaseOutputs: params["DatabaseOutputs"]?.map(x => fromDatabaseOutput(x)),
      RoleArn: params["RoleArn"],
      Timeout: params["Timeout"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "UpdateRecipeJob",
      method: "PUT",
      requestUri: cmnP.encodePath`/recipeJobs/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Updates specified ruleset. */
  async updateRuleset(
    params: UpdateRulesetRequest,
    opts: client.RequestOptions = {},
  ): Promise<UpdateRulesetResponse> {
    const body: jsonP.JSONObject = {
      Description: params["Description"],
      Rules: params["Rules"]?.map(x => fromRule(x)),
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "UpdateRuleset",
      method: "PUT",
      requestUri: cmnP.encodePath`/rulesets/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

  /** Modifies the definition of an existing DataBrew schedule. */
  async updateSchedule(
    params: UpdateScheduleRequest,
    opts: client.RequestOptions = {},
  ): Promise<UpdateScheduleResponse> {
    const body: jsonP.JSONObject = {
      JobNames: params["JobNames"],
      CronExpression: params["CronExpression"],
    };
    const resp = await this.#client.performRequest({
      opts, body,
      action: "UpdateSchedule",
      method: "PUT",
      requestUri: cmnP.encodePath`/schedules/${params["Name"]}`,
    });
    return jsonP.readObj({
      required: {
        "Name": "s",
      },
      optional: {},
    }, await resp.json());
  }

}

// refs: 1 - tags: named, input
export interface BatchDeleteRecipeVersionRequest {
  /** The name of the recipe whose versions are to be deleted. */
  Name: string;
  /**
   * An array of version identifiers, for the recipe versions to be deleted.
   * You can specify numeric versions (`X.Y`) or `LATEST_WORKING`.
   * `LATEST_PUBLISHED` is not supported.
   */
  RecipeVersions: string[];
}

// refs: 1 - tags: named, input
export interface CreateDatasetRequest {
  /**
   * The name of the dataset to be created.
   * Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.
   */
  Name: string;
  /** The file format of a dataset that is created from an Amazon S3 file or folder. */
  Format?: InputFormat | null;
  FormatOptions?: FormatOptions | null;
  Input: Input;
  /** A set of options that defines how DataBrew interprets an Amazon S3 path of the dataset. */
  PathOptions?: PathOptions | null;
  /** Metadata tags to apply to this dataset. */
  Tags?: { [key: string]: string | null | undefined } | null;
}

// refs: 1 - tags: named, input
export interface CreateProfileJobRequest {
  /** The name of the dataset that this job is to act upon. */
  DatasetName: string;
  /** The Amazon Resource Name (ARN) of an encryption key that is used to protect the job. */
  EncryptionKeyArn?: string | null;
  /**
   * The encryption mode for the job, which can be one of the following:
   *
   *   - `SSE-KMS` - `SSE-KMS` - Server-side encryption with KMS-managed keys.
   *   - `SSE-S3` - Server-side encryption with keys managed by Amazon S3.
   */
  EncryptionMode?: EncryptionMode | null;
  /**
   * The name of the job to be created.
   * Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.
   */
  Name: string;
  /**
   * Enables or disables Amazon CloudWatch logging for the job.
   * If logging is enabled, CloudWatch writes one log stream for each job run.
   */
  LogSubscription?: LogSubscription | null;
  /** The maximum number of nodes that DataBrew can use when the job processes data. */
  MaxCapacity?: number | null;
  /** The maximum number of times to retry the job after a job run fails. */
  MaxRetries?: number | null;
  OutputLocation: S3Location;
  /**
   * Configuration for profile jobs.
   * Used to select columns, do evaluations, and override default parameters of evaluations.
   * When configuration is null, the profile job will run with default settings.
   */
  Configuration?: ProfileConfiguration | null;
  /** List of validation configurations that are applied to the profile job. */
  ValidationConfigurations?: ValidationConfiguration[] | null;
  /** The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be assumed when DataBrew runs the job. */
  RoleArn: string;
  /** Metadata tags to apply to this job. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /**
   * The job's timeout in minutes.
   * A job that attempts to run longer than this timeout period ends with a status of `TIMEOUT`.
   */
  Timeout?: number | null;
  /**
   * Sample configuration for profile jobs only.
   * Determines the number of rows on which the profile job will be executed.
   * If a JobSample value is not provided, the default value will be used.
   * The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.
   */
  JobSample?: JobSample | null;
}

// refs: 1 - tags: named, input
export interface CreateProjectRequest {
  /** The name of an existing dataset to associate this project with. */
  DatasetName: string;
  /**
   * A unique name for the new project.
   * Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.
   */
  Name: string;
  /** The name of an existing recipe to associate with the project. */
  RecipeName: string;
  Sample?: Sample | null;
  /** The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be assumed for this request. */
  RoleArn: string;
  /** Metadata tags to apply to this project. */
  Tags?: { [key: string]: string | null | undefined } | null;
}

// refs: 1 - tags: named, input
export interface CreateRecipeRequest {
  /** A description for the recipe. */
  Description?: string | null;
  /**
   * A unique name for the recipe.
   * Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.
   */
  Name: string;
  /**
   * An array containing the steps to be performed by the recipe.
   * Each recipe step consists of one recipe action and (optionally) an array of condition expressions.
   */
  Steps: RecipeStep[];
  /** Metadata tags to apply to this recipe. */
  Tags?: { [key: string]: string | null | undefined } | null;
}

// refs: 1 - tags: named, input
export interface CreateRecipeJobRequest {
  /** The name of the dataset that this job processes. */
  DatasetName?: string | null;
  /** The Amazon Resource Name (ARN) of an encryption key that is used to protect the job. */
  EncryptionKeyArn?: string | null;
  /**
   * The encryption mode for the job, which can be one of the following:
   *
   *   - `SSE-KMS` - Server-side encryption with keys managed by KMS.
   *   - `SSE-S3` - Server-side encryption with keys managed by Amazon S3.
   */
  EncryptionMode?: EncryptionMode | null;
  /**
   * A unique name for the job.
   * Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.
   */
  Name: string;
  /**
   * Enables or disables Amazon CloudWatch logging for the job.
   * If logging is enabled, CloudWatch writes one log stream for each job run.
   */
  LogSubscription?: LogSubscription | null;
  /** The maximum number of nodes that DataBrew can consume when the job processes data. */
  MaxCapacity?: number | null;
  /** The maximum number of times to retry the job after a job run fails. */
  MaxRetries?: number | null;
  /** One or more artifacts that represent the output from running the job. */
  Outputs?: Output[] | null;
  /** One or more artifacts that represent the Glue Data Catalog output from running the job. */
  DataCatalogOutputs?: DataCatalogOutput[] | null;
  /** Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write to. */
  DatabaseOutputs?: DatabaseOutput[] | null;
  /** Either the name of an existing project, or a combination of a recipe and a dataset to associate with the recipe. */
  ProjectName?: string | null;
  RecipeReference?: RecipeReference | null;
  /** The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be assumed when DataBrew runs the job. */
  RoleArn: string;
  /** Metadata tags to apply to this job. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /**
   * The job's timeout in minutes.
   * A job that attempts to run longer than this timeout period ends with a status of `TIMEOUT`.
   */
  Timeout?: number | null;
}

// refs: 1 - tags: named, input
export interface CreateRulesetRequest {
  /**
   * The name of the ruleset to be created.
   * Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.
   */
  Name: string;
  /** The description of the ruleset. */
  Description?: string | null;
  /** The Amazon Resource Name (ARN) of a resource (dataset) that the ruleset is associated with. */
  TargetArn: string;
  /**
   * A list of rules that are defined with the ruleset.
   * A rule includes one or more checks to be validated on a DataBrew dataset.
   */
  Rules: Rule[];
  /** Metadata tags to apply to the ruleset. */
  Tags?: { [key: string]: string | null | undefined } | null;
}

// refs: 1 - tags: named, input
export interface CreateScheduleRequest {
  /** The name or names of one or more jobs to be run. */
  JobNames?: string[] | null;
  /**
   * The date or dates and time or times when the jobs are to be run.
   * For more information, see [Cron expressions](https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html) in the _Glue DataBrew Developer Guide_.
   */
  CronExpression: string;
  /** Metadata tags to apply to this schedule. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /**
   * A unique name for the schedule.
   * Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.
   */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DeleteDatasetRequest {
  /** The name of the dataset to be deleted. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DeleteJobRequest {
  /** The name of the job to be deleted. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DeleteProjectRequest {
  /** The name of the project to be deleted. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DeleteRecipeVersionRequest {
  /** The name of the recipe. */
  Name: string;
  /**
   * The version of the recipe to be deleted.
   * You can specify a numeric versions (`X.Y`) or `LATEST_WORKING`.
   * `LATEST_PUBLISHED` is not supported.
   */
  RecipeVersion: string;
}

// refs: 1 - tags: named, input
export interface DeleteRulesetRequest {
  /** The name of the ruleset to be deleted. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DeleteScheduleRequest {
  /** The name of the schedule to be deleted. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DescribeDatasetRequest {
  /** The name of the dataset to be described. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DescribeJobRequest {
  /** The name of the job to be described. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DescribeJobRunRequest {
  /** The name of the job being processed during this run. */
  Name: string;
  /** The unique identifier of the job run. */
  RunId: string;
}

// refs: 1 - tags: named, input
export interface DescribeProjectRequest {
  /** The name of the project to be described. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DescribeRecipeRequest {
  /** The name of the recipe to be described. */
  Name: string;
  /**
   * The recipe version identifier.
   * If this parameter isn't specified, then the latest published version is returned.
   */
  RecipeVersion?: string | null;
}

// refs: 1 - tags: named, input
export interface DescribeRulesetRequest {
  /** The name of the ruleset to be described. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface DescribeScheduleRequest {
  /** The name of the schedule to be described. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface ListDatasetsRequest {
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
  /** The token returned by a previous call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, input
export interface ListJobRunsRequest {
  /** The name of the job. */
  Name: string;
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
  /** The token returned by a previous call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, input
export interface ListJobsRequest {
  /**
   * The name of a dataset.
   * Using this parameter indicates to return only those jobs that act on the specified dataset.
   */
  DatasetName?: string | null;
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
  /**
   * A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated.
   * To get the next set of pages, pass in the NextToken value from the response object of the previous page call.
   */
  NextToken?: string | null;
  /**
   * The name of a project.
   * Using this parameter indicates to return only those jobs that are associated with the specified project.
   */
  ProjectName?: string | null;
}

// refs: 1 - tags: named, input
export interface ListProjectsRequest {
  /** The token returned by a previous call to retrieve the next set of results. */
  NextToken?: string | null;
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
}

// refs: 1 - tags: named, input
export interface ListRecipeVersionsRequest {
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
  /** The token returned by a previous call to retrieve the next set of results. */
  NextToken?: string | null;
  /** The name of the recipe for which to return version information. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface ListRecipesRequest {
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
  /** The token returned by a previous call to retrieve the next set of results. */
  NextToken?: string | null;
  /**
   * Return only those recipes with a version identifier of `LATEST_WORKING` or `LATEST_PUBLISHED`.
   * If `RecipeVersion` is omitted, `ListRecipes` returns all of the `LATEST_PUBLISHED` recipe versions.
   *
   * Valid values: `LATEST_WORKING` | `LATEST_PUBLISHED`
   */
  RecipeVersion?: string | null;
}

// refs: 1 - tags: named, input
export interface ListRulesetsRequest {
  /**
   * The Amazon Resource Name (ARN) of a resource (dataset).
   * Using this parameter indicates to return only those rulesets that are associated with the specified resource.
   */
  TargetArn?: string | null;
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
  /**
   * A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated.
   * To get the next set of pages, pass in the NextToken value from the response object of the previous page call.
   */
  NextToken?: string | null;
}

// refs: 1 - tags: named, input
export interface ListSchedulesRequest {
  /** The name of the job that these schedules apply to. */
  JobName?: string | null;
  /** The maximum number of results to return in this request. */
  MaxResults?: number | null;
  /** The token returned by a previous call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, input
export interface ListTagsForResourceRequest {
  /** The Amazon Resource Name (ARN) string that uniquely identifies the DataBrew resource. */
  ResourceArn: string;
}

// refs: 1 - tags: named, input
export interface PublishRecipeRequest {
  /** A description of the recipe to be published, for this version of the recipe. */
  Description?: string | null;
  /** The name of the recipe to be published. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface SendProjectSessionActionRequest {
  /** If true, the result of the recipe step will be returned, but not applied. */
  Preview?: boolean | null;
  /** The name of the project to apply the action to. */
  Name: string;
  RecipeStep?: RecipeStep | null;
  /**
   * The index from which to preview a step.
   * This index is used to preview the result of steps that have already been applied, so that the resulting view frame is from earlier in the view frame stack.
   */
  StepIndex?: number | null;
  /**
   * A unique identifier for an interactive session that's currently open and ready for work.
   * The action will be performed on this session.
   */
  ClientSessionId?: string | null;
  ViewFrame?: ViewFrame | null;
}

// refs: 1 - tags: named, input
export interface StartJobRunRequest {
  /** The name of the job to be run. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface StartProjectSessionRequest {
  /** The name of the project to act upon. */
  Name: string;
  /** A value that, if true, enables you to take control of a session, even if a different client is currently accessing the project. */
  AssumeControl?: boolean | null;
}

// refs: 1 - tags: named, input
export interface StopJobRunRequest {
  /** The name of the job to be stopped. */
  Name: string;
  /** The ID of the job run to be stopped. */
  RunId: string;
}

// refs: 1 - tags: named, input
export interface TagResourceRequest {
  /**
   * The DataBrew resource to which tags should be added.
   * The value for this parameter is an Amazon Resource Name (ARN).
   * For DataBrew, you can tag a dataset, a job, a project, or a recipe.
   */
  ResourceArn: string;
  /** One or more tags to be assigned to the resource. */
  Tags: { [key: string]: string | null | undefined };
}

// refs: 1 - tags: named, input
export interface UntagResourceRequest {
  /**
   * A DataBrew resource from which you want to remove a tag or tags.
   * The value for this parameter is an Amazon Resource Name (ARN).
   */
  ResourceArn: string;
  /** The tag keys (names) of one or more tags to be removed. */
  TagKeys: string[];
}

// refs: 1 - tags: named, input
export interface UpdateDatasetRequest {
  /** The name of the dataset to be updated. */
  Name: string;
  /** The file format of a dataset that is created from an Amazon S3 file or folder. */
  Format?: InputFormat | null;
  FormatOptions?: FormatOptions | null;
  Input: Input;
  /** A set of options that defines how DataBrew interprets an Amazon S3 path of the dataset. */
  PathOptions?: PathOptions | null;
}

// refs: 1 - tags: named, input
export interface UpdateProfileJobRequest {
  /**
   * Configuration for profile jobs.
   * Used to select columns, do evaluations, and override default parameters of evaluations.
   * When configuration is null, the profile job will run with default settings.
   */
  Configuration?: ProfileConfiguration | null;
  /** The Amazon Resource Name (ARN) of an encryption key that is used to protect the job. */
  EncryptionKeyArn?: string | null;
  /**
   * The encryption mode for the job, which can be one of the following:
   *
   *   - `SSE-KMS` - Server-side encryption with keys managed by KMS.
   *   - `SSE-S3` - Server-side encryption with keys managed by Amazon S3.
   */
  EncryptionMode?: EncryptionMode | null;
  /** The name of the job to be updated. */
  Name: string;
  /**
   * Enables or disables Amazon CloudWatch logging for the job.
   * If logging is enabled, CloudWatch writes one log stream for each job run.
   */
  LogSubscription?: LogSubscription | null;
  /** The maximum number of compute nodes that DataBrew can use when the job processes data. */
  MaxCapacity?: number | null;
  /** The maximum number of times to retry the job after a job run fails. */
  MaxRetries?: number | null;
  OutputLocation: S3Location;
  /** List of validation configurations that are applied to the profile job. */
  ValidationConfigurations?: ValidationConfiguration[] | null;
  /** The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be assumed when DataBrew runs the job. */
  RoleArn: string;
  /**
   * The job's timeout in minutes.
   * A job that attempts to run longer than this timeout period ends with a status of `TIMEOUT`.
   */
  Timeout?: number | null;
  /**
   * Sample configuration for Profile Jobs only.
   * Determines the number of rows on which the Profile job will be executed.
   * If a JobSample value is not provided for profile jobs, the default value will be used.
   * The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.
   */
  JobSample?: JobSample | null;
}

// refs: 1 - tags: named, input
export interface UpdateProjectRequest {
  Sample?: Sample | null;
  /** The Amazon Resource Name (ARN) of the IAM role to be assumed for this request. */
  RoleArn: string;
  /** The name of the project to be updated. */
  Name: string;
}

// refs: 1 - tags: named, input
export interface UpdateRecipeRequest {
  /** A description of the recipe. */
  Description?: string | null;
  /** The name of the recipe to be updated. */
  Name: string;
  /**
   * One or more steps to be performed by the recipe.
   * Each step consists of an action, and the conditions under which the action should succeed.
   */
  Steps?: RecipeStep[] | null;
}

// refs: 1 - tags: named, input
export interface UpdateRecipeJobRequest {
  /** The Amazon Resource Name (ARN) of an encryption key that is used to protect the job. */
  EncryptionKeyArn?: string | null;
  /**
   * The encryption mode for the job, which can be one of the following:
   *
   *   - `SSE-KMS` - Server-side encryption with keys managed by KMS.
   *   - `SSE-S3` - Server-side encryption with keys managed by Amazon S3.
   */
  EncryptionMode?: EncryptionMode | null;
  /** The name of the job to update. */
  Name: string;
  /**
   * Enables or disables Amazon CloudWatch logging for the job.
   * If logging is enabled, CloudWatch writes one log stream for each job run.
   */
  LogSubscription?: LogSubscription | null;
  /** The maximum number of nodes that DataBrew can consume when the job processes data. */
  MaxCapacity?: number | null;
  /** The maximum number of times to retry the job after a job run fails. */
  MaxRetries?: number | null;
  /** One or more artifacts that represent the output from running the job. */
  Outputs?: Output[] | null;
  /** One or more artifacts that represent the Glue Data Catalog output from running the job. */
  DataCatalogOutputs?: DataCatalogOutput[] | null;
  /** Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into. */
  DatabaseOutputs?: DatabaseOutput[] | null;
  /** The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be assumed when DataBrew runs the job. */
  RoleArn: string;
  /**
   * The job's timeout in minutes.
   * A job that attempts to run longer than this timeout period ends with a status of `TIMEOUT`.
   */
  Timeout?: number | null;
}

// refs: 1 - tags: named, input
export interface UpdateRulesetRequest {
  /** The name of the ruleset to be updated. */
  Name: string;
  /** The description of the ruleset. */
  Description?: string | null;
  /**
   * A list of rules that are defined with the ruleset.
   * A rule includes one or more checks to be validated on a DataBrew dataset.
   */
  Rules: Rule[];
}

// refs: 1 - tags: named, input
export interface UpdateScheduleRequest {
  /** The name or names of one or more jobs to be run for this schedule. */
  JobNames?: string[] | null;
  /**
   * The date or dates and time or times when the jobs are to be run.
   * For more information, see [Cron expressions](https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html) in the _Glue DataBrew Developer Guide_.
   */
  CronExpression: string;
  /** The name of the schedule to update. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface BatchDeleteRecipeVersionResponse {
  /** The name of the recipe that was modified. */
  Name: string;
  /** Errors, if any, that occurred while attempting to delete the recipe versions. */
  Errors?: RecipeVersionErrorDetail[] | null;
}

// refs: 1 - tags: named, output
export interface CreateDatasetResponse {
  /** The name of the dataset that you created. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface CreateProfileJobResponse {
  /** The name of the job that was created. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface CreateProjectResponse {
  /** The name of the project that you created. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface CreateRecipeResponse {
  /** The name of the recipe that you created. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface CreateRecipeJobResponse {
  /** The name of the job that you created. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface CreateRulesetResponse {
  /** The unique name of the created ruleset. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface CreateScheduleResponse {
  /** The name of the schedule that was created. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface DeleteDatasetResponse {
  /** The name of the dataset that you deleted. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface DeleteJobResponse {
  /** The name of the job that you deleted. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface DeleteProjectResponse {
  /** The name of the project that you deleted. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface DeleteRecipeVersionResponse {
  /** The name of the recipe that was deleted. */
  Name: string;
  /** The version of the recipe that was deleted. */
  RecipeVersion: string;
}

// refs: 1 - tags: named, output
export interface DeleteRulesetResponse {
  /** The name of the deleted ruleset. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface DeleteScheduleResponse {
  /** The name of the schedule that was deleted. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface DescribeDatasetResponse {
  /** The identifier (user name) of the user who created the dataset. */
  CreatedBy?: string | null;
  /** The date and time that the dataset was created. */
  CreateDate?: Date | number | null;
  /** The name of the dataset. */
  Name: string;
  /** The file format of a dataset that is created from an Amazon S3 file or folder. */
  Format?: InputFormat | null;
  FormatOptions?: FormatOptions | null;
  Input: Input;
  /** The date and time that the dataset was last modified. */
  LastModifiedDate?: Date | number | null;
  /** The identifier (user name) of the user who last modified the dataset. */
  LastModifiedBy?: string | null;
  /** The location of the data for this dataset, Amazon S3 or the Glue Data Catalog. */
  Source?: Source | null;
  /** A set of options that defines how DataBrew interprets an Amazon S3 path of the dataset. */
  PathOptions?: PathOptions | null;
  /** Metadata tags associated with this dataset. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /** The Amazon Resource Name (ARN) of the dataset. */
  ResourceArn?: string | null;
}

// refs: 1 - tags: named, output
export interface DescribeJobResponse {
  /** The date and time that the job was created. */
  CreateDate?: Date | number | null;
  /** The identifier (user name) of the user associated with the creation of the job. */
  CreatedBy?: string | null;
  /** The dataset that the job acts upon. */
  DatasetName?: string | null;
  /** The Amazon Resource Name (ARN) of an encryption key that is used to protect the job. */
  EncryptionKeyArn?: string | null;
  /**
   * The encryption mode for the job, which can be one of the following:
   *
   *   - `SSE-KMS` - Server-side encryption with keys managed by KMS.
   *   - `SSE-S3` - Server-side encryption with keys managed by Amazon S3.
   */
  EncryptionMode?: EncryptionMode | null;
  /** The name of the job. */
  Name: string;
  /**
   * The job type, which must be one of the following:
   *
   *   - `PROFILE` - The job analyzes the dataset to determine its size, data types, data distribution, and more.
   *   - `RECIPE` - The job applies one or more transformations to a dataset.
   */
  Type?: JobType | null;
  /** The identifier (user name) of the user who last modified the job. */
  LastModifiedBy?: string | null;
  /** The date and time that the job was last modified. */
  LastModifiedDate?: Date | number | null;
  /** Indicates whether Amazon CloudWatch logging is enabled for this job. */
  LogSubscription?: LogSubscription | null;
  /** The maximum number of compute nodes that DataBrew can consume when the job processes data. */
  MaxCapacity?: number | null;
  /** The maximum number of times to retry the job after a job run fails. */
  MaxRetries?: number | null;
  /** One or more artifacts that represent the output from running the job. */
  Outputs?: Output[] | null;
  /** One or more artifacts that represent the Glue Data Catalog output from running the job. */
  DataCatalogOutputs?: DataCatalogOutput[] | null;
  /** Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into. */
  DatabaseOutputs?: DatabaseOutput[] | null;
  /** The DataBrew project associated with this job. */
  ProjectName?: string | null;
  /**
   * Configuration for profile jobs.
   * Used to select columns, do evaluations, and override default parameters of evaluations.
   * When configuration is null, the profile job will run with default settings.
   */
  ProfileConfiguration?: ProfileConfiguration | null;
  /** List of validation configurations that are applied to the profile job. */
  ValidationConfigurations?: ValidationConfiguration[] | null;
  RecipeReference?: RecipeReference | null;
  /** The Amazon Resource Name (ARN) of the job. */
  ResourceArn?: string | null;
  /** The ARN of the Identity and Access Management (IAM) role to be assumed when DataBrew runs the job. */
  RoleArn?: string | null;
  /** Metadata tags associated with this job. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /**
   * The job's timeout in minutes.
   * A job that attempts to run longer than this timeout period ends with a status of `TIMEOUT`.
   */
  Timeout?: number | null;
  /**
   * Sample configuration for profile jobs only.
   * Determines the number of rows on which the profile job will be executed.
   */
  JobSample?: JobSample | null;
}

// refs: 1 - tags: named, output
export interface DescribeJobRunResponse {
  /** The number of times that DataBrew has attempted to run the job. */
  Attempt?: number | null;
  /** The date and time when the job completed processing. */
  CompletedOn?: Date | number | null;
  /** The name of the dataset for the job to process. */
  DatasetName?: string | null;
  /** A message indicating an error (if any) that was encountered when the job ran. */
  ErrorMessage?: string | null;
  /** The amount of time, in seconds, during which the job run consumed resources. */
  ExecutionTime?: number | null;
  /** The name of the job being processed during this run. */
  JobName: string;
  /**
   * Configuration for profile jobs.
   * Used to select columns, do evaluations, and override default parameters of evaluations.
   * When configuration is null, the profile job will run with default settings.
   */
  ProfileConfiguration?: ProfileConfiguration | null;
  /** List of validation configurations that are applied to the profile job. */
  ValidationConfigurations?: ValidationConfiguration[] | null;
  /** The unique identifier of the job run. */
  RunId?: string | null;
  /** The current state of the job run entity itself. */
  State?: JobRunState | null;
  /** The current status of Amazon CloudWatch logging for the job run. */
  LogSubscription?: LogSubscription | null;
  /** The name of an Amazon CloudWatch log group, where the job writes diagnostic messages when it runs. */
  LogGroupName?: string | null;
  /** One or more output artifacts from a job run. */
  Outputs?: Output[] | null;
  /** One or more artifacts that represent the Glue Data Catalog output from running the job. */
  DataCatalogOutputs?: DataCatalogOutput[] | null;
  /** Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into. */
  DatabaseOutputs?: DatabaseOutput[] | null;
  RecipeReference?: RecipeReference | null;
  /** The Amazon Resource Name (ARN) of the user who started the job run. */
  StartedBy?: string | null;
  /** The date and time when the job run began. */
  StartedOn?: Date | number | null;
  /**
   * Sample configuration for profile jobs only.
   * Determines the number of rows on which the profile job will be executed.
   * If a JobSample value is not provided, the default value will be used.
   * The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.
   */
  JobSample?: JobSample | null;
}

// refs: 1 - tags: named, output
export interface DescribeProjectResponse {
  /** The date and time that the project was created. */
  CreateDate?: Date | number | null;
  /** The identifier (user name) of the user who created the project. */
  CreatedBy?: string | null;
  /** The dataset associated with the project. */
  DatasetName?: string | null;
  /** The date and time that the project was last modified. */
  LastModifiedDate?: Date | number | null;
  /** The identifier (user name) of the user who last modified the project. */
  LastModifiedBy?: string | null;
  /** The name of the project. */
  Name: string;
  /** The recipe associated with this job. */
  RecipeName?: string | null;
  /** The Amazon Resource Name (ARN) of the project. */
  ResourceArn?: string | null;
  Sample?: Sample | null;
  /** The ARN of the Identity and Access Management (IAM) role to be assumed when DataBrew runs the job. */
  RoleArn?: string | null;
  /** Metadata tags associated with this project. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /**
   * Describes the current state of the session:
   *
   *   - `PROVISIONING` - allocating resources for the session.
   *   - `INITIALIZING` - getting the session ready for first use.
   *   - `ASSIGNED` - the session is ready for use.
   */
  SessionStatus?: SessionStatus | null;
  /** The identifier (user name) of the user that opened the project for use. */
  OpenedBy?: string | null;
  /** The date and time when the project was opened. */
  OpenDate?: Date | number | null;
}

// refs: 1 - tags: named, output
export interface DescribeRecipeResponse {
  /** The identifier (user name) of the user who created the recipe. */
  CreatedBy?: string | null;
  /** The date and time that the recipe was created. */
  CreateDate?: Date | number | null;
  /** The identifier (user name) of the user who last modified the recipe. */
  LastModifiedBy?: string | null;
  /** The date and time that the recipe was last modified. */
  LastModifiedDate?: Date | number | null;
  /** The name of the project associated with this recipe. */
  ProjectName?: string | null;
  /** The identifier (user name) of the user who last published the recipe. */
  PublishedBy?: string | null;
  /** The date and time when the recipe was last published. */
  PublishedDate?: Date | number | null;
  /** The description of the recipe. */
  Description?: string | null;
  /** The name of the recipe. */
  Name: string;
  /**
   * One or more steps to be performed by the recipe.
   * Each step consists of an action, and the conditions under which the action should succeed.
   */
  Steps?: RecipeStep[] | null;
  /** Metadata tags associated with this project. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /** The ARN of the recipe. */
  ResourceArn?: string | null;
  /** The recipe version identifier. */
  RecipeVersion?: string | null;
}

// refs: 1 - tags: named, output
export interface DescribeRulesetResponse {
  /** The name of the ruleset. */
  Name: string;
  /** The description of the ruleset. */
  Description?: string | null;
  /** The Amazon Resource Name (ARN) of a resource (dataset) that the ruleset is associated with. */
  TargetArn?: string | null;
  /**
   * A list of rules that are defined with the ruleset.
   * A rule includes one or more checks to be validated on a DataBrew dataset.
   */
  Rules?: Rule[] | null;
  /** The date and time that the ruleset was created. */
  CreateDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) of the user who created the ruleset. */
  CreatedBy?: string | null;
  /** The Amazon Resource Name (ARN) of the user who last modified the ruleset. */
  LastModifiedBy?: string | null;
  /** The modification date and time of the ruleset. */
  LastModifiedDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) for the ruleset. */
  ResourceArn?: string | null;
  /** Metadata tags that have been applied to the ruleset. */
  Tags?: { [key: string]: string | null | undefined } | null;
}

// refs: 1 - tags: named, output
export interface DescribeScheduleResponse {
  /** The date and time that the schedule was created. */
  CreateDate?: Date | number | null;
  /** The identifier (user name) of the user who created the schedule. */
  CreatedBy?: string | null;
  /** The name or names of one or more jobs to be run by using the schedule. */
  JobNames?: string[] | null;
  /** The identifier (user name) of the user who last modified the schedule. */
  LastModifiedBy?: string | null;
  /** The date and time that the schedule was last modified. */
  LastModifiedDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) of the schedule. */
  ResourceArn?: string | null;
  /**
   * The date or dates and time or times when the jobs are to be run for the schedule.
   * For more information, see [Cron expressions](https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html) in the _Glue DataBrew Developer Guide_.
   */
  CronExpression?: string | null;
  /** Metadata tags associated with this schedule. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /** The name of the schedule. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface ListDatasetsResponse {
  /** A list of datasets that are defined. */
  Datasets: Dataset[];
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, output
export interface ListJobRunsResponse {
  /** A list of job runs that have occurred for the specified job. */
  JobRuns: JobRun[];
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, output
export interface ListJobsResponse {
  /** A list of jobs that are defined. */
  Jobs: Job[];
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, output
export interface ListProjectsResponse {
  /** A list of projects that are defined . */
  Projects: Project[];
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, output
export interface ListRecipeVersionsResponse {
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
  /** A list of versions for the specified recipe. */
  Recipes: Recipe[];
}

// refs: 1 - tags: named, output
export interface ListRecipesResponse {
  /** A list of recipes that are defined. */
  Recipes: Recipe[];
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, output
export interface ListRulesetsResponse {
  /**
   * A list of RulesetItem.
   * RulesetItem contains meta data of a ruleset.
   */
  Rulesets: RulesetItem[];
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, output
export interface ListSchedulesResponse {
  /** A list of schedules that are defined. */
  Schedules: Schedule[];
  /** A token that you can use in a subsequent call to retrieve the next set of results. */
  NextToken?: string | null;
}

// refs: 1 - tags: named, output
export interface ListTagsForResourceResponse {
  /** A list of tags associated with the DataBrew resource. */
  Tags?: { [key: string]: string | null | undefined } | null;
}

// refs: 1 - tags: named, output
export interface PublishRecipeResponse {
  /** The name of the recipe that you published. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface SendProjectSessionActionResponse {
  /** A message indicating the result of performing the action. */
  Result?: string | null;
  /** The name of the project that was affected by the action. */
  Name: string;
  /** A unique identifier for the action that was performed. */
  ActionId?: number | null;
}

// refs: 1 - tags: named, output
export interface StartJobRunResponse {
  /** A system-generated identifier for this particular job run. */
  RunId: string;
}

// refs: 1 - tags: named, output
export interface StartProjectSessionResponse {
  /** The name of the project to be acted upon. */
  Name: string;
  /** A system-generated identifier for the session. */
  ClientSessionId?: string | null;
}

// refs: 1 - tags: named, output
export interface StopJobRunResponse {
  /** The ID of the job run that you stopped. */
  RunId: string;
}

// refs: 1 - tags: named, output
export interface UpdateDatasetResponse {
  /** The name of the dataset that you updated. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface UpdateProfileJobResponse {
  /** The name of the job that was updated. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface UpdateProjectResponse {
  /** The date and time that the project was last modified. */
  LastModifiedDate?: Date | number | null;
  /** The name of the project that you updated. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface UpdateRecipeResponse {
  /** The name of the recipe that was updated. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface UpdateRecipeJobResponse {
  /** The name of the job that you updated. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface UpdateRulesetResponse {
  /** The name of the updated ruleset. */
  Name: string;
}

// refs: 1 - tags: named, output
export interface UpdateScheduleResponse {
  /** The name of the schedule that was updated. */
  Name: string;
}

// refs: 4 - tags: input, named, enum, output
export type InputFormat =
| "CSV"
| "JSON"
| "PARQUET"
| "EXCEL"
| "ORC"
| cmnP.UnexpectedEnumValue;

// refs: 4 - tags: input, named, interface, output
/** Represents a set of options that define the structure of either comma-separated value (CSV), Excel, or JSON input. */
export interface FormatOptions {
  /** Options that define how JSON input is to be interpreted by DataBrew. */
  Json?: JsonOptions | null;
  /** Options that define how Excel input is to be interpreted by DataBrew. */
  Excel?: ExcelOptions | null;
  /** Options that define how CSV input is to be interpreted by DataBrew. */
  Csv?: CsvOptions | null;
}
function fromFormatOptions(input?: FormatOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Json: fromJsonOptions(input["Json"]),
    Excel: fromExcelOptions(input["Excel"]),
    Csv: fromCsvOptions(input["Csv"]),
  }
}
function toFormatOptions(root: jsonP.JSONValue): FormatOptions {
  return jsonP.readObj({
    required: {},
    optional: {
      "Json": toJsonOptions,
      "Excel": toExcelOptions,
      "Csv": toCsvOptions,
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Represents the JSON-specific options that define how input is to be interpreted by Glue DataBrew. */
export interface JsonOptions {
  /** A value that specifies whether JSON input contains embedded new line characters. */
  MultiLine?: boolean | null;
}
function fromJsonOptions(input?: JsonOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    MultiLine: input["MultiLine"],
  }
}
function toJsonOptions(root: jsonP.JSONValue): JsonOptions {
  return jsonP.readObj({
    required: {},
    optional: {
      "MultiLine": "b",
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Represents a set of options that define how DataBrew will interpret a Microsoft Excel file when creating a dataset from that file. */
export interface ExcelOptions {
  /** One or more named sheets in the Excel file that will be included in the dataset. */
  SheetNames?: string[] | null;
  /** One or more sheet numbers in the Excel file that will be included in the dataset. */
  SheetIndexes?: number[] | null;
  /**
   * A variable that specifies whether the first row in the file is parsed as the header.
   * If this value is false, column names are auto-generated.
   */
  HeaderRow?: boolean | null;
}
function fromExcelOptions(input?: ExcelOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    SheetNames: input["SheetNames"],
    SheetIndexes: input["SheetIndexes"],
    HeaderRow: input["HeaderRow"],
  }
}
function toExcelOptions(root: jsonP.JSONValue): ExcelOptions {
  return jsonP.readObj({
    required: {},
    optional: {
      "SheetNames": ["s"],
      "SheetIndexes": ["n"],
      "HeaderRow": "b",
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Represents a set of options that define how DataBrew will read a comma-separated value (CSV) file when creating a dataset from that file. */
export interface CsvOptions {
  /** A single character that specifies the delimiter being used in the CSV file. */
  Delimiter?: string | null;
  /**
   * A variable that specifies whether the first row in the file is parsed as the header.
   * If this value is false, column names are auto-generated.
   */
  HeaderRow?: boolean | null;
}
function fromCsvOptions(input?: CsvOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Delimiter: input["Delimiter"],
    HeaderRow: input["HeaderRow"],
  }
}
function toCsvOptions(root: jsonP.JSONValue): CsvOptions {
  return jsonP.readObj({
    required: {},
    optional: {
      "Delimiter": "s",
      "HeaderRow": "b",
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Represents information on how DataBrew can find data, in either the Glue Data Catalog or Amazon S3. */
export interface Input {
  /** The Amazon S3 location where the data is stored. */
  S3InputDefinition?: S3Location | null;
  /** The Glue Data Catalog parameters for the data. */
  DataCatalogInputDefinition?: DataCatalogInputDefinition | null;
  /** Connection information for dataset input files stored in a database. */
  DatabaseInputDefinition?: DatabaseInputDefinition | null;
  /** Contains additional resource information needed for specific datasets. */
  Metadata?: Metadata | null;
}
function fromInput(input?: Input | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    S3InputDefinition: fromS3Location(input["S3InputDefinition"]),
    DataCatalogInputDefinition: fromDataCatalogInputDefinition(input["DataCatalogInputDefinition"]),
    DatabaseInputDefinition: fromDatabaseInputDefinition(input["DatabaseInputDefinition"]),
    Metadata: fromMetadata(input["Metadata"]),
  }
}
function toInput(root: jsonP.JSONValue): Input {
  return jsonP.readObj({
    required: {},
    optional: {
      "S3InputDefinition": toS3Location,
      "DataCatalogInputDefinition": toDataCatalogInputDefinition,
      "DatabaseInputDefinition": toDatabaseInputDefinition,
      "Metadata": toMetadata,
    },
  }, root);
}

// refs: 38 - tags: input, named, interface, output
/** Represents an Amazon S3 location (bucket name, bucket owner, and object key) where DataBrew can read input data, or write output from a job. */
export interface S3Location {
  /** The Amazon S3 bucket name. */
  Bucket: string;
  /** The unique name of the object in the bucket. */
  Key?: string | null;
  /** The Amazon Web Services account ID of the bucket owner. */
  BucketOwner?: string | null;
}
function fromS3Location(input?: S3Location | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Bucket: input["Bucket"],
    Key: input["Key"],
    BucketOwner: input["BucketOwner"],
  }
}
function toS3Location(root: jsonP.JSONValue): S3Location {
  return jsonP.readObj({
    required: {
      "Bucket": "s",
    },
    optional: {
      "Key": "s",
      "BucketOwner": "s",
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Represents how metadata stored in the Glue Data Catalog is defined in a DataBrew dataset. */
export interface DataCatalogInputDefinition {
  /** The unique identifier of the Amazon Web Services account that holds the Data Catalog that stores the data. */
  CatalogId?: string | null;
  /** The name of a database in the Data Catalog. */
  DatabaseName: string;
  /**
   * The name of a database table in the Data Catalog.
   * This table corresponds to a DataBrew dataset.
   */
  TableName: string;
  /** Represents an Amazon location where DataBrew can store intermediate results. */
  TempDirectory?: S3Location | null;
}
function fromDataCatalogInputDefinition(input?: DataCatalogInputDefinition | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    CatalogId: input["CatalogId"],
    DatabaseName: input["DatabaseName"],
    TableName: input["TableName"],
    TempDirectory: fromS3Location(input["TempDirectory"]),
  }
}
function toDataCatalogInputDefinition(root: jsonP.JSONValue): DataCatalogInputDefinition {
  return jsonP.readObj({
    required: {
      "DatabaseName": "s",
      "TableName": "s",
    },
    optional: {
      "CatalogId": "s",
      "TempDirectory": toS3Location,
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Connection information for dataset input files stored in a database. */
export interface DatabaseInputDefinition {
  /** The Glue Connection that stores the connection information for the target database. */
  GlueConnectionName: string;
  /** The table within the target database. */
  DatabaseTableName?: string | null;
  TempDirectory?: S3Location | null;
  /**
   * Custom SQL to run against the provided Glue connection.
   * This SQL will be used as the input for DataBrew projects and jobs.
   */
  QueryString?: string | null;
}
function fromDatabaseInputDefinition(input?: DatabaseInputDefinition | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    GlueConnectionName: input["GlueConnectionName"],
    DatabaseTableName: input["DatabaseTableName"],
    TempDirectory: fromS3Location(input["TempDirectory"]),
    QueryString: input["QueryString"],
  }
}
function toDatabaseInputDefinition(root: jsonP.JSONValue): DatabaseInputDefinition {
  return jsonP.readObj({
    required: {
      "GlueConnectionName": "s",
    },
    optional: {
      "DatabaseTableName": "s",
      "TempDirectory": toS3Location,
      "QueryString": "s",
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Contains additional resource information needed for specific datasets. */
export interface Metadata {
  /**
   * The Amazon Resource Name (ARN) associated with the dataset.
   * Currently, DataBrew only supports ARNs from Amazon AppFlow.
   */
  SourceArn?: string | null;
}
function fromMetadata(input?: Metadata | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    SourceArn: input["SourceArn"],
  }
}
function toMetadata(root: jsonP.JSONValue): Metadata {
  return jsonP.readObj({
    required: {},
    optional: {
      "SourceArn": "s",
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Represents a set of options that define how DataBrew selects files for a given Amazon S3 path in a dataset. */
export interface PathOptions {
  /** If provided, this structure defines a date range for matching Amazon S3 objects based on their LastModifiedDate attribute in Amazon S3. */
  LastModifiedDateCondition?: FilterExpression | null;
  /** If provided, this structure imposes a limit on a number of files that should be selected. */
  FilesLimit?: FilesLimit | null;
  /** A structure that maps names of parameters used in the Amazon S3 path of a dataset to their definitions. */
  Parameters?: { [key: string]: DatasetParameter | null | undefined } | null;
}
function fromPathOptions(input?: PathOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    LastModifiedDateCondition: fromFilterExpression(input["LastModifiedDateCondition"]),
    FilesLimit: fromFilesLimit(input["FilesLimit"]),
    Parameters: jsonP.serializeMap(input["Parameters"], x => fromDatasetParameter(x)),
  }
}
function toPathOptions(root: jsonP.JSONValue): PathOptions {
  return jsonP.readObj({
    required: {},
    optional: {
      "LastModifiedDateCondition": toFilterExpression,
      "FilesLimit": toFilesLimit,
      "Parameters": x => jsonP.readMap(String, toDatasetParameter, x),
    },
  }, root);
}

// refs: 8 - tags: input, named, interface, output
/**
 * Represents a structure for defining parameter conditions.
 * Supported conditions are described here: [Supported conditions for dynamic datasets](https://docs.aws.amazon.com/databrew/latest/dg/datasets.multiple-files.html#conditions.for.dynamic.datasets) in the _Glue DataBrew Developer Guide_.
 */
export interface FilterExpression {
  /**
   * The expression which includes condition names followed by substitution variables, possibly grouped and combined with other conditions.
   * For example, "(starts_with :prefix1 or starts_with :prefix2) and (ends_with :suffix1 or ends_with :suffix2)".
   * Substitution variables should start with ':' symbol.
   */
  Expression: string;
  /** The map of substitution variable names to their values used in this filter expression. */
  ValuesMap: { [key: string]: string | null | undefined };
}
function fromFilterExpression(input?: FilterExpression | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Expression: input["Expression"],
    ValuesMap: input["ValuesMap"],
  }
}
function toFilterExpression(root: jsonP.JSONValue): FilterExpression {
  return jsonP.readObj({
    required: {
      "Expression": "s",
      "ValuesMap": x => jsonP.readMap(String, String, x),
    },
    optional: {},
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/** Represents a limit imposed on number of Amazon S3 files that should be selected for a dataset from a connected Amazon S3 path. */
export interface FilesLimit {
  /** The number of Amazon S3 files to select. */
  MaxFiles: number;
  /**
   * A criteria to use for Amazon S3 files sorting before their selection.
   * By default uses LAST_MODIFIED_DATE as a sorting criteria.
   * Currently it's the only allowed value.
   */
  OrderedBy?: OrderedBy | null;
  /**
   * A criteria to use for Amazon S3 files sorting before their selection.
   * By default uses DESCENDING order, i.e.
   * most recent files are selected first.
   * Another possible value is ASCENDING.
   */
  Order?: Order | null;
}
function fromFilesLimit(input?: FilesLimit | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    MaxFiles: input["MaxFiles"],
    OrderedBy: input["OrderedBy"],
    Order: input["Order"],
  }
}
function toFilesLimit(root: jsonP.JSONValue): FilesLimit {
  return jsonP.readObj({
    required: {
      "MaxFiles": "n",
    },
    optional: {
      "OrderedBy": (x: jsonP.JSONValue) => cmnP.readEnum<OrderedBy>(x),
      "Order": (x: jsonP.JSONValue) => cmnP.readEnum<Order>(x),
    },
  }, root);
}

// refs: 4 - tags: input, named, enum, output
export type OrderedBy =
| "LAST_MODIFIED_DATE"
| cmnP.UnexpectedEnumValue;

// refs: 4 - tags: input, named, enum, output
export type Order =
| "DESCENDING"
| "ASCENDING"
| cmnP.UnexpectedEnumValue;

// refs: 4 - tags: input, named, interface, output
/** Represents a dataset parameter that defines type and conditions for a parameter in the Amazon S3 path of the dataset. */
export interface DatasetParameter {
  /** The name of the parameter that is used in the dataset's Amazon S3 path. */
  Name: string;
  /** The type of the dataset parameter, can be one of a 'String', 'Number' or 'Datetime'. */
  Type: ParameterType;
  /**
   * Additional parameter options such as a format and a timezone.
   * Required for datetime parameters.
   */
  DatetimeOptions?: DatetimeOptions | null;
  /** Optional boolean value that defines whether the captured value of this parameter should be used to create a new column in a dataset. */
  CreateColumn?: boolean | null;
  /** The optional filter expression structure to apply additional matching criteria to the parameter. */
  Filter?: FilterExpression | null;
}
function fromDatasetParameter(input?: DatasetParameter | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Name: input["Name"],
    Type: input["Type"],
    DatetimeOptions: fromDatetimeOptions(input["DatetimeOptions"]),
    CreateColumn: input["CreateColumn"],
    Filter: fromFilterExpression(input["Filter"]),
  }
}
function toDatasetParameter(root: jsonP.JSONValue): DatasetParameter {
  return jsonP.readObj({
    required: {
      "Name": "s",
      "Type": (x: jsonP.JSONValue) => cmnP.readEnum<ParameterType>(x),
    },
    optional: {
      "DatetimeOptions": toDatetimeOptions,
      "CreateColumn": "b",
      "Filter": toFilterExpression,
    },
  }, root);
}

// refs: 4 - tags: input, named, enum, output
export type ParameterType =
| "Datetime"
| "Number"
| "String"
| cmnP.UnexpectedEnumValue;

// refs: 4 - tags: input, named, interface, output
/** Represents additional options for correct interpretation of datetime parameters used in the Amazon S3 path of a dataset. */
export interface DatetimeOptions {
  /**
   * Required option, that defines the datetime format used for a date parameter in the Amazon S3 path.
   * Should use only supported datetime specifiers and separation characters, all literal a-z or A-Z characters should be escaped with single quotes.
   * E.g.
   * "MM.dd.yyyy-'at'-HH:mm".
   */
  Format: string;
  /**
   * Optional value for a timezone offset of the datetime parameter value in the Amazon S3 path.
   * Shouldn't be used if Format for this parameter includes timezone fields.
   * If no offset specified, UTC is assumed.
   */
  TimezoneOffset?: string | null;
  /** Optional value for a non-US locale code, needed for correct interpretation of some date formats. */
  LocaleCode?: string | null;
}
function fromDatetimeOptions(input?: DatetimeOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Format: input["Format"],
    TimezoneOffset: input["TimezoneOffset"],
    LocaleCode: input["LocaleCode"],
  }
}
function toDatetimeOptions(root: jsonP.JSONValue): DatetimeOptions {
  return jsonP.readObj({
    required: {
      "Format": "s",
    },
    optional: {
      "TimezoneOffset": "s",
      "LocaleCode": "s",
    },
  }, root);
}

// refs: 6 - tags: input, named, enum, output
export type EncryptionMode =
| "SSE-KMS"
| "SSE-S3"
| cmnP.UnexpectedEnumValue;

// refs: 8 - tags: input, named, enum, output
export type LogSubscription =
| "ENABLE"
| "DISABLE"
| cmnP.UnexpectedEnumValue;

// refs: 4 - tags: input, named, interface, output
/**
 * Configuration for profile jobs.
 * Configuration can be used to select columns, do evaluations, and override default parameters of evaluations.
 * When configuration is undefined, the profile job will apply default settings to all supported columns.
 */
export interface ProfileConfiguration {
  /**
   * Configuration for inter-column evaluations.
   * Configuration can be used to select evaluations and override parameters of evaluations.
   * When configuration is undefined, the profile job will run all supported inter-column evaluations.
   */
  DatasetStatisticsConfiguration?: StatisticsConfiguration | null;
  /**
   * List of column selectors.
   * ProfileColumns can be used to select columns from the dataset.
   * When ProfileColumns is undefined, the profile job will profile all supported columns.
   */
  ProfileColumns?: ColumnSelector[] | null;
  /**
   * List of configurations for column evaluations.
   * ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns.
   * When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.
   */
  ColumnStatisticsConfigurations?: ColumnStatisticsConfiguration[] | null;
  /**
   * Configuration of entity detection for a profile job.
   * When undefined, entity detection is disabled.
   */
  EntityDetectorConfiguration?: EntityDetectorConfiguration | null;
}
function fromProfileConfiguration(input?: ProfileConfiguration | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    DatasetStatisticsConfiguration: fromStatisticsConfiguration(input["DatasetStatisticsConfiguration"]),
    ProfileColumns: input["ProfileColumns"]?.map(x => fromColumnSelector(x)),
    ColumnStatisticsConfigurations: input["ColumnStatisticsConfigurations"]?.map(x => fromColumnStatisticsConfiguration(x)),
    EntityDetectorConfiguration: fromEntityDetectorConfiguration(input["EntityDetectorConfiguration"]),
  }
}
function toProfileConfiguration(root: jsonP.JSONValue): ProfileConfiguration {
  return jsonP.readObj({
    required: {},
    optional: {
      "DatasetStatisticsConfiguration": toStatisticsConfiguration,
      "ProfileColumns": [toColumnSelector],
      "ColumnStatisticsConfigurations": [toColumnStatisticsConfiguration],
      "EntityDetectorConfiguration": toEntityDetectorConfiguration,
    },
  }, root);
}

// refs: 8 - tags: input, named, interface, output
/**
 * Configuration of evaluations for a profile job.
 * This configuration can be used to select evaluations and override the parameters of selected evaluations.
 */
export interface StatisticsConfiguration {
  /**
   * List of included evaluations.
   * When the list is undefined, all supported evaluations will be included.
   */
  IncludedStatistics?: string[] | null;
  /** List of overrides for evaluations. */
  Overrides?: StatisticOverride[] | null;
}
function fromStatisticsConfiguration(input?: StatisticsConfiguration | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    IncludedStatistics: input["IncludedStatistics"],
    Overrides: input["Overrides"]?.map(x => fromStatisticOverride(x)),
  }
}
function toStatisticsConfiguration(root: jsonP.JSONValue): StatisticsConfiguration {
  return jsonP.readObj({
    required: {},
    optional: {
      "IncludedStatistics": ["s"],
      "Overrides": [toStatisticOverride],
    },
  }, root);
}

// refs: 8 - tags: input, named, interface, output
/** Override of a particular evaluation for a profile job. */
export interface StatisticOverride {
  /** The name of an evaluation */
  Statistic: string;
  /** A map that includes overrides of an evaluation’s parameters. */
  Parameters: { [key: string]: string | null | undefined };
}
function fromStatisticOverride(input?: StatisticOverride | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Statistic: input["Statistic"],
    Parameters: input["Parameters"],
  }
}
function toStatisticOverride(root: jsonP.JSONValue): StatisticOverride {
  return jsonP.readObj({
    required: {
      "Statistic": "s",
      "Parameters": x => jsonP.readMap(String, String, x),
    },
    optional: {},
  }, root);
}

// refs: 11 - tags: input, named, interface, output
/**
 * Selector of a column from a dataset for profile job configuration.
 * One selector includes either a column name or a regular expression.
 */
export interface ColumnSelector {
  /** A regular expression for selecting a column from a dataset. */
  Regex?: string | null;
  /** The name of a column from a dataset. */
  Name?: string | null;
}
function fromColumnSelector(input?: ColumnSelector | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Regex: input["Regex"],
    Name: input["Name"],
  }
}
function toColumnSelector(root: jsonP.JSONValue): ColumnSelector {
  return jsonP.readObj({
    required: {},
    optional: {
      "Regex": "s",
      "Name": "s",
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/**
 * Configuration for column evaluations for a profile job.
 * ColumnStatisticsConfiguration can be used to select evaluations and override parameters of evaluations for particular columns.
 */
export interface ColumnStatisticsConfiguration {
  /**
   * List of column selectors.
   * Selectors can be used to select columns from the dataset.
   * When selectors are undefined, configuration will be applied to all supported columns.
   */
  Selectors?: ColumnSelector[] | null;
  /**
   * Configuration for evaluations.
   * Statistics can be used to select evaluations and override parameters of evaluations.
   */
  Statistics: StatisticsConfiguration;
}
function fromColumnStatisticsConfiguration(input?: ColumnStatisticsConfiguration | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Selectors: input["Selectors"]?.map(x => fromColumnSelector(x)),
    Statistics: fromStatisticsConfiguration(input["Statistics"]),
  }
}
function toColumnStatisticsConfiguration(root: jsonP.JSONValue): ColumnStatisticsConfiguration {
  return jsonP.readObj({
    required: {
      "Statistics": toStatisticsConfiguration,
    },
    optional: {
      "Selectors": [toColumnSelector],
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/**
 * Configuration of entity detection for a profile job.
 * When undefined, entity detection is disabled.
 */
export interface EntityDetectorConfiguration {
  /**
   * Entity types to detect.
   * Can be any of the following:
   *
   *   - USA_SSN
   *   - EMAIL
   *   - USA_ITIN
   *   - USA_PASSPORT_NUMBER
   *   - PHONE_NUMBER
   *   - USA_DRIVING_LICENSE
   *   - BANK_ACCOUNT
   *   - CREDIT_CARD
   *   - IP_ADDRESS
   *   - MAC_ADDRESS
   *   - USA_DEA_NUMBER
   *   - USA_HCPCS_CODE
   *   - USA_NATIONAL_PROVIDER_IDENTIFIER
   *   - USA_NATIONAL_DRUG_CODE
   *   - USA_HEALTH_INSURANCE_CLAIM_NUMBER
   *   - USA_MEDICARE_BENEFICIARY_IDENTIFIER
   *   - USA_CPT_CODE
   *   - PERSON_NAME
   *   - DATE
   *
   * The Entity type group USA_ALL is also supported, and includes all of the above entity types except PERSON_NAME and DATE.
   */
  EntityTypes: string[];
  /**
   * Configuration of statistics that are allowed to be run on columns that contain detected entities.
   * When undefined, no statistics will be computed on columns that contain detected entities.
   */
  AllowedStatistics?: AllowedStatistics[] | null;
}
function fromEntityDetectorConfiguration(input?: EntityDetectorConfiguration | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    EntityTypes: input["EntityTypes"],
    AllowedStatistics: input["AllowedStatistics"]?.map(x => fromAllowedStatistics(x)),
  }
}
function toEntityDetectorConfiguration(root: jsonP.JSONValue): EntityDetectorConfiguration {
  return jsonP.readObj({
    required: {
      "EntityTypes": ["s"],
    },
    optional: {
      "AllowedStatistics": [toAllowedStatistics],
    },
  }, root);
}

// refs: 4 - tags: input, named, interface, output
/**
 * Configuration of statistics that are allowed to be run on columns that contain detected entities.
 * When undefined, no statistics will be computed on columns that contain detected entities.
 */
export interface AllowedStatistics {
  /** One or more column statistics to allow for columns that contain detected entities. */
  Statistics: string[];
}
function fromAllowedStatistics(input?: AllowedStatistics | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Statistics: input["Statistics"],
  }
}
function toAllowedStatistics(root: jsonP.JSONValue): AllowedStatistics {
  return jsonP.readObj({
    required: {
      "Statistics": ["s"],
    },
    optional: {},
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/**
 * Configuration for data quality validation.
 * Used to select the Rulesets and Validation Mode to be used in the profile job.
 * When ValidationConfiguration is null, the profile job will run without data quality validation.
 */
export interface ValidationConfiguration {
  /**
   * The Amazon Resource Name (ARN) for the ruleset to be validated in the profile job.
   * The TargetArn of the selected ruleset should be the same as the Amazon Resource Name (ARN) of the dataset that is associated with the profile job.
   */
  RulesetArn: string;
  /**
   * Mode of data quality validation.
   * Default mode is “CHECK_ALL” which verifies all rules defined in the selected ruleset.
   */
  ValidationMode?: ValidationMode | null;
}
function fromValidationConfiguration(input?: ValidationConfiguration | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    RulesetArn: input["RulesetArn"],
    ValidationMode: input["ValidationMode"],
  }
}
function toValidationConfiguration(root: jsonP.JSONValue): ValidationConfiguration {
  return jsonP.readObj({
    required: {
      "RulesetArn": "s",
    },
    optional: {
      "ValidationMode": (x: jsonP.JSONValue) => cmnP.readEnum<ValidationMode>(x),
    },
  }, root);
}

// refs: 6 - tags: input, named, enum, output
export type ValidationMode =
| "CHECK_ALL"
| cmnP.UnexpectedEnumValue;

// refs: 6 - tags: input, named, interface, output
/**
 * A sample configuration for profile jobs only, which determines the number of rows on which the profile job is run.
 * If a `JobSample` value isn't provided, the default is used.
 * The default value is CUSTOM_ROWS for the mode parameter and 20,000 for the size parameter.
 */
export interface JobSample {
  /**
   * A value that determines whether the profile job is run on the entire dataset or a specified number of rows.
   * This value must be one of the following:
   *
   *   - FULL_DATASET - The profile job is run on the entire dataset.
   *   - CUSTOM_ROWS - The profile job is run on the number of rows specified in the `Size` parameter.
   */
  Mode?: SampleMode | null;
  /**
   * The `Size` parameter is only required when the mode is CUSTOM_ROWS.
   * The profile job is run on the specified number of rows.
   * The maximum value for size is Long.MAX_VALUE.
   *
   * Long.MAX_VALUE = 9223372036854775807
   */
  Size?: number | null;
}
function fromJobSample(input?: JobSample | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Mode: input["Mode"],
    Size: input["Size"],
  }
}
function toJobSample(root: jsonP.JSONValue): JobSample {
  return jsonP.readObj({
    required: {},
    optional: {
      "Mode": (x: jsonP.JSONValue) => cmnP.readEnum<SampleMode>(x),
      "Size": "n",
    },
  }, root);
}

// refs: 6 - tags: input, named, enum, output
export type SampleMode =
| "FULL_DATASET"
| "CUSTOM_ROWS"
| cmnP.UnexpectedEnumValue;

// refs: 4 - tags: input, named, interface, output
/** Represents the sample size and sampling type for DataBrew to use for interactive data analysis. */
export interface Sample {
  /** The number of rows in the sample. */
  Size?: number | null;
  /** The way in which DataBrew obtains rows from a dataset. */
  Type: SampleType;
}
function fromSample(input?: Sample | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Size: input["Size"],
    Type: input["Type"],
  }
}
function toSample(root: jsonP.JSONValue): Sample {
  return jsonP.readObj({
    required: {
      "Type": (x: jsonP.JSONValue) => cmnP.readEnum<SampleType>(x),
    },
    optional: {
      "Size": "n",
    },
  }, root);
}

// refs: 4 - tags: input, named, enum, output
export type SampleType =
| "FIRST_N"
| "LAST_N"
| "RANDOM"
| cmnP.UnexpectedEnumValue;

// refs: 6 - tags: input, named, interface, output
/** Represents a single step from a DataBrew recipe to be performed. */
export interface RecipeStep {
  /** The particular action to be performed in the recipe step. */
  Action: RecipeAction;
  /**
   * One or more conditions that must be met for the recipe step to succeed.
   *
   *   Note:
   *   All of the conditions in the array must be met.
   *   In other words, all of the conditions must be combined using a logical AND operation.
   */
  ConditionExpressions?: ConditionExpression[] | null;
}
function fromRecipeStep(input?: RecipeStep | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Action: fromRecipeAction(input["Action"]),
    ConditionExpressions: input["ConditionExpressions"]?.map(x => fromConditionExpression(x)),
  }
}
function toRecipeStep(root: jsonP.JSONValue): RecipeStep {
  return jsonP.readObj({
    required: {
      "Action": toRecipeAction,
    },
    optional: {
      "ConditionExpressions": [toConditionExpression],
    },
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/**
 * Represents a transformation and associated parameters that are used to apply a change to a DataBrew dataset.
 * For more information, see [Recipe actions reference](https://docs.aws.amazon.com/databrew/latest/dg/recipe-actions-reference.html).
 */
export interface RecipeAction {
  /** The name of a valid DataBrew transformation to be performed on the data. */
  Operation: string;
  /** Contextual parameters for the transformation. */
  Parameters?: { [key: string]: string | null | undefined } | null;
}
function fromRecipeAction(input?: RecipeAction | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Operation: input["Operation"],
    Parameters: input["Parameters"],
  }
}
function toRecipeAction(root: jsonP.JSONValue): RecipeAction {
  return jsonP.readObj({
    required: {
      "Operation": "s",
    },
    optional: {
      "Parameters": x => jsonP.readMap(String, String, x),
    },
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/**
 * Represents an individual condition that evaluates to true or false.
 *
 * Conditions are used with recipe actions.
 * The action is only performed for column values where the condition evaluates to true.
 *
 * If a recipe requires more than one condition, then the recipe must specify multiple `ConditionExpression` elements.
 * Each condition is applied to the rows in a dataset first, before the recipe action is performed.
 */
export interface ConditionExpression {
  /**
   * A specific condition to apply to a recipe action.
   * For more information, see [Recipe structure](https://docs.aws.amazon.com/databrew/latest/dg/recipes.html#recipes.structure) in the _Glue DataBrew Developer Guide_.
   */
  Condition: string;
  /** A value that the condition must evaluate to for the condition to succeed. */
  Value?: string | null;
  /** A column to apply this condition to. */
  TargetColumn: string;
}
function fromConditionExpression(input?: ConditionExpression | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Condition: input["Condition"],
    Value: input["Value"],
    TargetColumn: input["TargetColumn"],
  }
}
function toConditionExpression(root: jsonP.JSONValue): ConditionExpression {
  return jsonP.readObj({
    required: {
      "Condition": "s",
      "TargetColumn": "s",
    },
    optional: {
      "Value": "s",
    },
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/** Represents options that specify how and where in Amazon S3 DataBrew writes the output generated by recipe jobs or profile jobs. */
export interface Output {
  /** The compression algorithm used to compress the output text of the job. */
  CompressionFormat?: CompressionFormat | null;
  /** The data format of the output of the job. */
  Format?: OutputFormat | null;
  /** The names of one or more partition columns for the output of the job. */
  PartitionColumns?: string[] | null;
  /** The location in Amazon S3 where the job writes its output. */
  Location: S3Location;
  /** A value that, if true, means that any data in the location specified for output is overwritten with new output. */
  Overwrite?: boolean | null;
  /** Represents options that define how DataBrew formats job output files. */
  FormatOptions?: OutputFormatOptions | null;
  /**
   * Maximum number of files to be generated by the job and written to the output folder.
   * For output partitioned by column(s), the MaxOutputFiles value is the maximum number of files per partition.
   */
  MaxOutputFiles?: number | null;
}
function fromOutput(input?: Output | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    CompressionFormat: input["CompressionFormat"],
    Format: input["Format"],
    PartitionColumns: input["PartitionColumns"],
    Location: fromS3Location(input["Location"]),
    Overwrite: input["Overwrite"],
    FormatOptions: fromOutputFormatOptions(input["FormatOptions"]),
    MaxOutputFiles: input["MaxOutputFiles"],
  }
}
function toOutput(root: jsonP.JSONValue): Output {
  return jsonP.readObj({
    required: {
      "Location": toS3Location,
    },
    optional: {
      "CompressionFormat": (x: jsonP.JSONValue) => cmnP.readEnum<CompressionFormat>(x),
      "Format": (x: jsonP.JSONValue) => cmnP.readEnum<OutputFormat>(x),
      "PartitionColumns": ["s"],
      "Overwrite": "b",
      "FormatOptions": toOutputFormatOptions,
      "MaxOutputFiles": "n",
    },
  }, root);
}

// refs: 6 - tags: input, named, enum, output
export type CompressionFormat =
| "GZIP"
| "LZ4"
| "SNAPPY"
| "BZIP2"
| "DEFLATE"
| "LZO"
| "BROTLI"
| "ZSTD"
| "ZLIB"
| cmnP.UnexpectedEnumValue;

// refs: 6 - tags: input, named, enum, output
export type OutputFormat =
| "CSV"
| "JSON"
| "PARQUET"
| "GLUEPARQUET"
| "AVRO"
| "ORC"
| "XML"
| "TABLEAUHYPER"
| cmnP.UnexpectedEnumValue;

// refs: 6 - tags: input, named, interface, output
/** Represents a set of options that define the structure of comma-separated (CSV) job output. */
export interface OutputFormatOptions {
  /** Represents a set of options that define the structure of comma-separated value (CSV) job output. */
  Csv?: CsvOutputOptions | null;
}
function fromOutputFormatOptions(input?: OutputFormatOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Csv: fromCsvOutputOptions(input["Csv"]),
  }
}
function toOutputFormatOptions(root: jsonP.JSONValue): OutputFormatOptions {
  return jsonP.readObj({
    required: {},
    optional: {
      "Csv": toCsvOutputOptions,
    },
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/** Represents a set of options that define how DataBrew will write a comma-separated value (CSV) file. */
export interface CsvOutputOptions {
  /** A single character that specifies the delimiter used to create CSV job output. */
  Delimiter?: string | null;
}
function fromCsvOutputOptions(input?: CsvOutputOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Delimiter: input["Delimiter"],
  }
}
function toCsvOutputOptions(root: jsonP.JSONValue): CsvOutputOptions {
  return jsonP.readObj({
    required: {},
    optional: {
      "Delimiter": "s",
    },
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/** Represents options that specify how and where in the Glue Data Catalog DataBrew writes the output generated by recipe jobs. */
export interface DataCatalogOutput {
  /** The unique identifier of the Amazon Web Services account that holds the Data Catalog that stores the data. */
  CatalogId?: string | null;
  /** The name of a database in the Data Catalog. */
  DatabaseName: string;
  /** The name of a table in the Data Catalog. */
  TableName: string;
  /** Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs. */
  S3Options?: S3TableOutputOptions | null;
  /** Represents options that specify how and where DataBrew writes the database output generated by recipe jobs. */
  DatabaseOptions?: DatabaseTableOutputOptions | null;
  /**
   * A value that, if true, means that any data in the location specified for output is overwritten with new output.
   * Not supported with DatabaseOptions.
   */
  Overwrite?: boolean | null;
}
function fromDataCatalogOutput(input?: DataCatalogOutput | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    CatalogId: input["CatalogId"],
    DatabaseName: input["DatabaseName"],
    TableName: input["TableName"],
    S3Options: fromS3TableOutputOptions(input["S3Options"]),
    DatabaseOptions: fromDatabaseTableOutputOptions(input["DatabaseOptions"]),
    Overwrite: input["Overwrite"],
  }
}
function toDataCatalogOutput(root: jsonP.JSONValue): DataCatalogOutput {
  return jsonP.readObj({
    required: {
      "DatabaseName": "s",
      "TableName": "s",
    },
    optional: {
      "CatalogId": "s",
      "S3Options": toS3TableOutputOptions,
      "DatabaseOptions": toDatabaseTableOutputOptions,
      "Overwrite": "b",
    },
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/** Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs. */
export interface S3TableOutputOptions {
  /** Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job. */
  Location: S3Location;
}
function fromS3TableOutputOptions(input?: S3TableOutputOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Location: fromS3Location(input["Location"]),
  }
}
function toS3TableOutputOptions(root: jsonP.JSONValue): S3TableOutputOptions {
  return jsonP.readObj({
    required: {
      "Location": toS3Location,
    },
    optional: {},
  }, root);
}

// refs: 12 - tags: input, named, interface, output
/** Represents options that specify how and where DataBrew writes the database output generated by recipe jobs. */
export interface DatabaseTableOutputOptions {
  /** Represents an Amazon S3 location (bucket name and object key) where DataBrew can store intermediate results. */
  TempDirectory?: S3Location | null;
  /** A prefix for the name of a table DataBrew will create in the database. */
  TableName: string;
}
function fromDatabaseTableOutputOptions(input?: DatabaseTableOutputOptions | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    TempDirectory: fromS3Location(input["TempDirectory"]),
    TableName: input["TableName"],
  }
}
function toDatabaseTableOutputOptions(root: jsonP.JSONValue): DatabaseTableOutputOptions {
  return jsonP.readObj({
    required: {
      "TableName": "s",
    },
    optional: {
      "TempDirectory": toS3Location,
    },
  }, root);
}

// refs: 6 - tags: input, named, interface, output
/** Represents a JDBC database output object which defines the output destination for a DataBrew recipe job to write into. */
export interface DatabaseOutput {
  /** The Glue connection that stores the connection information for the target database. */
  GlueConnectionName: string;
  /** Represents options that specify how and where DataBrew writes the database output generated by recipe jobs. */
  DatabaseOptions: DatabaseTableOutputOptions;
  /**
   * The output mode to write into the database.
   * Currently supported option: NEW_TABLE.
   */
  DatabaseOutputMode?: DatabaseOutputMode | null;
}
function fromDatabaseOutput(input?: DatabaseOutput | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    GlueConnectionName: input["GlueConnectionName"],
    DatabaseOptions: fromDatabaseTableOutputOptions(input["DatabaseOptions"]),
    DatabaseOutputMode: input["DatabaseOutputMode"],
  }
}
function toDatabaseOutput(root: jsonP.JSONValue): DatabaseOutput {
  return jsonP.readObj({
    required: {
      "GlueConnectionName": "s",
      "DatabaseOptions": toDatabaseTableOutputOptions,
    },
    optional: {
      "DatabaseOutputMode": (x: jsonP.JSONValue) => cmnP.readEnum<DatabaseOutputMode>(x),
    },
  }, root);
}

// refs: 6 - tags: input, named, enum, output
export type DatabaseOutputMode =
| "NEW_TABLE"
| cmnP.UnexpectedEnumValue;

// refs: 5 - tags: input, named, interface, output
/** Represents the name and version of a DataBrew recipe. */
export interface RecipeReference {
  /** The name of the recipe. */
  Name: string;
  /** The identifier for the version for the recipe. */
  RecipeVersion?: string | null;
}
function fromRecipeReference(input?: RecipeReference | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Name: input["Name"],
    RecipeVersion: input["RecipeVersion"],
  }
}
function toRecipeReference(root: jsonP.JSONValue): RecipeReference {
  return jsonP.readObj({
    required: {
      "Name": "s",
    },
    optional: {
      "RecipeVersion": "s",
    },
  }, root);
}

// refs: 3 - tags: input, named, interface, output
/** Represents a single data quality requirement that should be validated in the scope of this dataset. */
export interface Rule {
  /** The name of the rule. */
  Name: string;
  /**
   * A value that specifies whether the rule is disabled.
   * Once a rule is disabled, a profile job will not validate it during a job run.
   * Default value is false.
   */
  Disabled?: boolean | null;
  /**
   * The expression which includes column references, condition names followed by variable references, possibly grouped and combined with other conditions.
   * For example, `(:col1 starts_with :prefix1 or :col1 starts_with :prefix2) and (:col1 ends_with :suffix1 or :col1 ends_with :suffix2)`.
   * Column and value references are substitution variables that should start with the ':' symbol.
   * Depending on the context, substitution variables' values can be either an actual value or a column name.
   * These values are defined in the SubstitutionMap.
   * If a CheckExpression starts with a column reference, then ColumnSelectors in the rule should be null.
   * If ColumnSelectors has been defined, then there should be no column reference in the left side of a condition, for example, `is_between :val1 and :val2`.
   *
   * For more information, see [Available checks](https://docs.aws.amazon.com/databrew/latest/dg/profile.data-quality-available-checks.html)
   */
  CheckExpression: string;
  /**
   * The map of substitution variable names to their values used in a check expression.
   * Variable names should start with a ':' (colon).
   * Variable values can either be actual values or column names.
   * To differentiate between the two, column names should be enclosed in backticks, for example, `":col1": "`Column A`".`
   */
  SubstitutionMap?: { [key: string]: string | null | undefined } | null;
  /**
   * The threshold used with a non-aggregate check expression.
   * Non-aggregate check expressions will be applied to each row in a specific column, and the threshold will be used to determine whether the validation succeeds.
   */
  Threshold?: Threshold | null;
  /**
   * List of column selectors.
   * Selectors can be used to select columns using a name or regular expression from the dataset.
   * Rule will be applied to selected columns.
   */
  ColumnSelectors?: ColumnSelector[] | null;
}
function fromRule(input?: Rule | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Name: input["Name"],
    Disabled: input["Disabled"],
    CheckExpression: input["CheckExpression"],
    SubstitutionMap: input["SubstitutionMap"],
    Threshold: fromThreshold(input["Threshold"]),
    ColumnSelectors: input["ColumnSelectors"]?.map(x => fromColumnSelector(x)),
  }
}
function toRule(root: jsonP.JSONValue): Rule {
  return jsonP.readObj({
    required: {
      "Name": "s",
      "CheckExpression": "s",
    },
    optional: {
      "Disabled": "b",
      "SubstitutionMap": x => jsonP.readMap(String, String, x),
      "Threshold": toThreshold,
      "ColumnSelectors": [toColumnSelector],
    },
  }, root);
}

// refs: 3 - tags: input, named, interface, output
/**
 * The threshold used with a non-aggregate check expression.
 * The non-aggregate check expression will be applied to each row in a specific column.
 * Then the threshold will be used to determine whether the validation succeeds.
 */
export interface Threshold {
  /** The value of a threshold. */
  Value: number;
  /**
   * The type of a threshold.
   * Used for comparison of an actual count of rows that satisfy the rule to the threshold value.
   */
  Type?: ThresholdType | null;
  /**
   * Unit of threshold value.
   * Can be either a COUNT or PERCENTAGE of the full sample size used for validation.
   */
  Unit?: ThresholdUnit | null;
}
function fromThreshold(input?: Threshold | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    Value: input["Value"],
    Type: input["Type"],
    Unit: input["Unit"],
  }
}
function toThreshold(root: jsonP.JSONValue): Threshold {
  return jsonP.readObj({
    required: {
      "Value": "n",
    },
    optional: {
      "Type": (x: jsonP.JSONValue) => cmnP.readEnum<ThresholdType>(x),
      "Unit": (x: jsonP.JSONValue) => cmnP.readEnum<ThresholdUnit>(x),
    },
  }, root);
}

// refs: 3 - tags: input, named, enum, output
export type ThresholdType =
| "GREATER_THAN_OR_EQUAL"
| "LESS_THAN_OR_EQUAL"
| "GREATER_THAN"
| "LESS_THAN"
| cmnP.UnexpectedEnumValue;

// refs: 3 - tags: input, named, enum, output
export type ThresholdUnit =
| "COUNT"
| "PERCENTAGE"
| cmnP.UnexpectedEnumValue;

// refs: 1 - tags: input, named, interface
/** Represents the data being transformed during an action. */
export interface ViewFrame {
  /** The starting index for the range of columns to return in the view frame. */
  StartColumnIndex: number;
  /** The number of columns to include in the view frame, beginning with the `StartColumnIndex` value and ignoring any columns in the `HiddenColumns` list. */
  ColumnRange?: number | null;
  /** A list of columns to hide in the view frame. */
  HiddenColumns?: string[] | null;
  /** The starting index for the range of rows to return in the view frame. */
  StartRowIndex?: number | null;
  /** The number of rows to include in the view frame, beginning with the `StartRowIndex` value. */
  RowRange?: number | null;
  /**
   * Controls if analytics computation is enabled or disabled.
   * Enabled by default.
   */
  Analytics?: AnalyticsMode | null;
}
function fromViewFrame(input?: ViewFrame | null): jsonP.JSONValue {
  if (!input) return input;
  return {
    StartColumnIndex: input["StartColumnIndex"],
    ColumnRange: input["ColumnRange"],
    HiddenColumns: input["HiddenColumns"],
    StartRowIndex: input["StartRowIndex"],
    RowRange: input["RowRange"],
    Analytics: input["Analytics"],
  }
}

// refs: 1 - tags: input, named, enum
export type AnalyticsMode =
| "ENABLE"
| "DISABLE"
| cmnP.UnexpectedEnumValue;

// refs: 1 - tags: output, named, interface
/** Represents any errors encountered when attempting to delete multiple recipe versions. */
export interface RecipeVersionErrorDetail {
  /** The HTTP status code for the error. */
  ErrorCode?: string | null;
  /** The text of the error message. */
  ErrorMessage?: string | null;
  /** The identifier for the recipe version associated with this error. */
  RecipeVersion?: string | null;
}
function toRecipeVersionErrorDetail(root: jsonP.JSONValue): RecipeVersionErrorDetail {
  return jsonP.readObj({
    required: {},
    optional: {
      "ErrorCode": "s",
      "ErrorMessage": "s",
      "RecipeVersion": "s",
    },
  }, root);
}

// refs: 2 - tags: output, named, enum
export type Source =
| "S3"
| "DATA-CATALOG"
| "DATABASE"
| cmnP.UnexpectedEnumValue;

// refs: 2 - tags: output, named, enum
export type JobType =
| "PROFILE"
| "RECIPE"
| cmnP.UnexpectedEnumValue;

// refs: 2 - tags: output, named, enum
export type JobRunState =
| "STARTING"
| "RUNNING"
| "STOPPING"
| "STOPPED"
| "SUCCEEDED"
| "FAILED"
| "TIMEOUT"
| cmnP.UnexpectedEnumValue;

// refs: 1 - tags: output, named, enum
export type SessionStatus =
| "ASSIGNED"
| "FAILED"
| "INITIALIZING"
| "PROVISIONING"
| "READY"
| "RECYCLING"
| "ROTATING"
| "TERMINATED"
| "TERMINATING"
| "UPDATING"
| cmnP.UnexpectedEnumValue;

// refs: 1 - tags: output, named, interface
/** Represents a dataset that can be processed by DataBrew. */
export interface Dataset {
  /** The ID of the Amazon Web Services account that owns the dataset. */
  AccountId?: string | null;
  /** The Amazon Resource Name (ARN) of the user who created the dataset. */
  CreatedBy?: string | null;
  /** The date and time that the dataset was created. */
  CreateDate?: Date | number | null;
  /** The unique name of the dataset. */
  Name: string;
  /** The file format of a dataset that is created from an Amazon S3 file or folder. */
  Format?: InputFormat | null;
  /** A set of options that define how DataBrew interprets the data in the dataset. */
  FormatOptions?: FormatOptions | null;
  /** Information on how DataBrew can find the dataset, in either the Glue Data Catalog or Amazon S3. */
  Input: Input;
  /** The last modification date and time of the dataset. */
  LastModifiedDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) of the user who last modified the dataset. */
  LastModifiedBy?: string | null;
  /** The location of the data for the dataset, either Amazon S3 or the Glue Data Catalog. */
  Source?: Source | null;
  /** A set of options that defines how DataBrew interprets an Amazon S3 path of the dataset. */
  PathOptions?: PathOptions | null;
  /** Metadata tags that have been applied to the dataset. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /** The unique Amazon Resource Name (ARN) for the dataset. */
  ResourceArn?: string | null;
}
function toDataset(root: jsonP.JSONValue): Dataset {
  return jsonP.readObj({
    required: {
      "Name": "s",
      "Input": toInput,
    },
    optional: {
      "AccountId": "s",
      "CreatedBy": "s",
      "CreateDate": "d",
      "Format": (x: jsonP.JSONValue) => cmnP.readEnum<InputFormat>(x),
      "FormatOptions": toFormatOptions,
      "LastModifiedDate": "d",
      "LastModifiedBy": "s",
      "Source": (x: jsonP.JSONValue) => cmnP.readEnum<Source>(x),
      "PathOptions": toPathOptions,
      "Tags": x => jsonP.readMap(String, String, x),
      "ResourceArn": "s",
    },
  }, root);
}

// refs: 1 - tags: output, named, interface
/** Represents one run of a DataBrew job. */
export interface JobRun {
  /** The number of times that DataBrew has attempted to run the job. */
  Attempt?: number | null;
  /** The date and time when the job completed processing. */
  CompletedOn?: Date | number | null;
  /** The name of the dataset for the job to process. */
  DatasetName?: string | null;
  /** A message indicating an error (if any) that was encountered when the job ran. */
  ErrorMessage?: string | null;
  /** The amount of time, in seconds, during which a job run consumed resources. */
  ExecutionTime?: number | null;
  /** The name of the job being processed during this run. */
  JobName?: string | null;
  /** The unique identifier of the job run. */
  RunId?: string | null;
  /** The current state of the job run entity itself. */
  State?: JobRunState | null;
  /** The current status of Amazon CloudWatch logging for the job run. */
  LogSubscription?: LogSubscription | null;
  /** The name of an Amazon CloudWatch log group, where the job writes diagnostic messages when it runs. */
  LogGroupName?: string | null;
  /** One or more output artifacts from a job run. */
  Outputs?: Output[] | null;
  /** One or more artifacts that represent the Glue Data Catalog output from running the job. */
  DataCatalogOutputs?: DataCatalogOutput[] | null;
  /** Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into. */
  DatabaseOutputs?: DatabaseOutput[] | null;
  /** The set of steps processed by the job. */
  RecipeReference?: RecipeReference | null;
  /** The Amazon Resource Name (ARN) of the user who initiated the job run. */
  StartedBy?: string | null;
  /** The date and time when the job run began. */
  StartedOn?: Date | number | null;
  /**
   * A sample configuration for profile jobs only, which determines the number of rows on which the profile job is run.
   * If a `JobSample` value isn't provided, the default is used.
   * The default value is CUSTOM_ROWS for the mode parameter and 20,000 for the size parameter.
   */
  JobSample?: JobSample | null;
  /** List of validation configurations that are applied to the profile job run. */
  ValidationConfigurations?: ValidationConfiguration[] | null;
}
function toJobRun(root: jsonP.JSONValue): JobRun {
  return jsonP.readObj({
    required: {},
    optional: {
      "Attempt": "n",
      "CompletedOn": "d",
      "DatasetName": "s",
      "ErrorMessage": "s",
      "ExecutionTime": "n",
      "JobName": "s",
      "RunId": "s",
      "State": (x: jsonP.JSONValue) => cmnP.readEnum<JobRunState>(x),
      "LogSubscription": (x: jsonP.JSONValue) => cmnP.readEnum<LogSubscription>(x),
      "LogGroupName": "s",
      "Outputs": [toOutput],
      "DataCatalogOutputs": [toDataCatalogOutput],
      "DatabaseOutputs": [toDatabaseOutput],
      "RecipeReference": toRecipeReference,
      "StartedBy": "s",
      "StartedOn": "d",
      "JobSample": toJobSample,
      "ValidationConfigurations": [toValidationConfiguration],
    },
  }, root);
}

// refs: 1 - tags: output, named, interface
/** Represents all of the attributes of a DataBrew job. */
export interface Job {
  /** The ID of the Amazon Web Services account that owns the job. */
  AccountId?: string | null;
  /** The Amazon Resource Name (ARN) of the user who created the job. */
  CreatedBy?: string | null;
  /** The date and time that the job was created. */
  CreateDate?: Date | number | null;
  /** A dataset that the job is to process. */
  DatasetName?: string | null;
  /**
   * The Amazon Resource Name (ARN) of an encryption key that is used to protect the job output.
   * For more information, see [Encrypting data written by DataBrew jobs](https://docs.aws.amazon.com/databrew/latest/dg/encryption-security-configuration.html)
   */
  EncryptionKeyArn?: string | null;
  /**
   * The encryption mode for the job, which can be one of the following:
   *
   *   - `SSE-KMS` - Server-side encryption with keys managed by KMS.
   *   - `SSE-S3` - Server-side encryption with keys managed by Amazon S3.
   */
  EncryptionMode?: EncryptionMode | null;
  /** The unique name of the job. */
  Name: string;
  /**
   * The job type of the job, which must be one of the following:
   *
   *   - `PROFILE` - A job to analyze a dataset, to determine its size, data types, data distribution, and more.
   *   - `RECIPE` - A job to apply one or more transformations to a dataset.
   */
  Type?: JobType | null;
  /** The Amazon Resource Name (ARN) of the user who last modified the job. */
  LastModifiedBy?: string | null;
  /** The modification date and time of the job. */
  LastModifiedDate?: Date | number | null;
  /** The current status of Amazon CloudWatch logging for the job. */
  LogSubscription?: LogSubscription | null;
  /** The maximum number of nodes that can be consumed when the job processes data. */
  MaxCapacity?: number | null;
  /** The maximum number of times to retry the job after a job run fails. */
  MaxRetries?: number | null;
  /** One or more artifacts that represent output from running the job. */
  Outputs?: Output[] | null;
  /** One or more artifacts that represent the Glue Data Catalog output from running the job. */
  DataCatalogOutputs?: DataCatalogOutput[] | null;
  /** Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into. */
  DatabaseOutputs?: DatabaseOutput[] | null;
  /** The name of the project that the job is associated with. */
  ProjectName?: string | null;
  /** A set of steps that the job runs. */
  RecipeReference?: RecipeReference | null;
  /** The unique Amazon Resource Name (ARN) for the job. */
  ResourceArn?: string | null;
  /** The Amazon Resource Name (ARN) of the role to be assumed for this job. */
  RoleArn?: string | null;
  /**
   * The job's timeout in minutes.
   * A job that attempts to run longer than this timeout period ends with a status of `TIMEOUT`.
   */
  Timeout?: number | null;
  /** Metadata tags that have been applied to the job. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /**
   * A sample configuration for profile jobs only, which determines the number of rows on which the profile job is run.
   * If a `JobSample` value isn't provided, the default value is used.
   * The default value is CUSTOM_ROWS for the mode parameter and 20,000 for the size parameter.
   */
  JobSample?: JobSample | null;
  /** List of validation configurations that are applied to the profile job. */
  ValidationConfigurations?: ValidationConfiguration[] | null;
}
function toJob(root: jsonP.JSONValue): Job {
  return jsonP.readObj({
    required: {
      "Name": "s",
    },
    optional: {
      "AccountId": "s",
      "CreatedBy": "s",
      "CreateDate": "d",
      "DatasetName": "s",
      "EncryptionKeyArn": "s",
      "EncryptionMode": (x: jsonP.JSONValue) => cmnP.readEnum<EncryptionMode>(x),
      "Type": (x: jsonP.JSONValue) => cmnP.readEnum<JobType>(x),
      "LastModifiedBy": "s",
      "LastModifiedDate": "d",
      "LogSubscription": (x: jsonP.JSONValue) => cmnP.readEnum<LogSubscription>(x),
      "MaxCapacity": "n",
      "MaxRetries": "n",
      "Outputs": [toOutput],
      "DataCatalogOutputs": [toDataCatalogOutput],
      "DatabaseOutputs": [toDatabaseOutput],
      "ProjectName": "s",
      "RecipeReference": toRecipeReference,
      "ResourceArn": "s",
      "RoleArn": "s",
      "Timeout": "n",
      "Tags": x => jsonP.readMap(String, String, x),
      "JobSample": toJobSample,
      "ValidationConfigurations": [toValidationConfiguration],
    },
  }, root);
}

// refs: 1 - tags: output, named, interface
/** Represents all of the attributes of a DataBrew project. */
export interface Project {
  /** The ID of the Amazon Web Services account that owns the project. */
  AccountId?: string | null;
  /** The date and time that the project was created. */
  CreateDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) of the user who crated the project. */
  CreatedBy?: string | null;
  /** The dataset that the project is to act upon. */
  DatasetName?: string | null;
  /** The last modification date and time for the project. */
  LastModifiedDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) of the user who last modified the project. */
  LastModifiedBy?: string | null;
  /** The unique name of a project. */
  Name: string;
  /** The name of a recipe that will be developed during a project session. */
  RecipeName: string;
  /** The Amazon Resource Name (ARN) for the project. */
  ResourceArn?: string | null;
  /**
   * The sample size and sampling type to apply to the data.
   * If this parameter isn't specified, then the sample consists of the first 500 rows from the dataset.
   */
  Sample?: Sample | null;
  /** Metadata tags that have been applied to the project. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /** The Amazon Resource Name (ARN) of the role that will be assumed for this project. */
  RoleArn?: string | null;
  /** The Amazon Resource Name (ARN) of the user that opened the project for use. */
  OpenedBy?: string | null;
  /** The date and time when the project was opened. */
  OpenDate?: Date | number | null;
}
function toProject(root: jsonP.JSONValue): Project {
  return jsonP.readObj({
    required: {
      "Name": "s",
      "RecipeName": "s",
    },
    optional: {
      "AccountId": "s",
      "CreateDate": "d",
      "CreatedBy": "s",
      "DatasetName": "s",
      "LastModifiedDate": "d",
      "LastModifiedBy": "s",
      "ResourceArn": "s",
      "Sample": toSample,
      "Tags": x => jsonP.readMap(String, String, x),
      "RoleArn": "s",
      "OpenedBy": "s",
      "OpenDate": "d",
    },
  }, root);
}

// refs: 2 - tags: output, named, interface
/** Represents one or more actions to be performed on a DataBrew dataset. */
export interface Recipe {
  /** The Amazon Resource Name (ARN) of the user who created the recipe. */
  CreatedBy?: string | null;
  /** The date and time that the recipe was created. */
  CreateDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) of the user who last modified the recipe. */
  LastModifiedBy?: string | null;
  /** The last modification date and time of the recipe. */
  LastModifiedDate?: Date | number | null;
  /** The name of the project that the recipe is associated with. */
  ProjectName?: string | null;
  /** The Amazon Resource Name (ARN) of the user who published the recipe. */
  PublishedBy?: string | null;
  /** The date and time when the recipe was published. */
  PublishedDate?: Date | number | null;
  /** The description of the recipe. */
  Description?: string | null;
  /** The unique name for the recipe. */
  Name: string;
  /** The Amazon Resource Name (ARN) for the recipe. */
  ResourceArn?: string | null;
  /** A list of steps that are defined by the recipe. */
  Steps?: RecipeStep[] | null;
  /** Metadata tags that have been applied to the recipe. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /**
   * The identifier for the version for the recipe.
   * Must be one of the following:
   *
   *   - Numeric version (`X.Y`) - `X` and `Y` stand for major and minor version numbers.
   *     The maximum length of each is 6 digits, and neither can be negative values.
   *     Both `X` and `Y` are required, and "0.0" isn't a valid version.
   *   - `LATEST_WORKING` - the most recent valid version being developed in a DataBrew project.
   *   - `LATEST_PUBLISHED` - the most recent published version.
   */
  RecipeVersion?: string | null;
}
function toRecipe(root: jsonP.JSONValue): Recipe {
  return jsonP.readObj({
    required: {
      "Name": "s",
    },
    optional: {
      "CreatedBy": "s",
      "CreateDate": "d",
      "LastModifiedBy": "s",
      "LastModifiedDate": "d",
      "ProjectName": "s",
      "PublishedBy": "s",
      "PublishedDate": "d",
      "Description": "s",
      "ResourceArn": "s",
      "Steps": [toRecipeStep],
      "Tags": x => jsonP.readMap(String, String, x),
      "RecipeVersion": "s",
    },
  }, root);
}

// refs: 1 - tags: output, named, interface
/** Contains metadata about the ruleset. */
export interface RulesetItem {
  /** The ID of the Amazon Web Services account that owns the ruleset. */
  AccountId?: string | null;
  /** The Amazon Resource Name (ARN) of the user who created the ruleset. */
  CreatedBy?: string | null;
  /** The date and time that the ruleset was created. */
  CreateDate?: Date | number | null;
  /** The description of the ruleset. */
  Description?: string | null;
  /** The Amazon Resource Name (ARN) of the user who last modified the ruleset. */
  LastModifiedBy?: string | null;
  /** The modification date and time of the ruleset. */
  LastModifiedDate?: Date | number | null;
  /** The name of the ruleset. */
  Name: string;
  /** The Amazon Resource Name (ARN) for the ruleset. */
  ResourceArn?: string | null;
  /** The number of rules that are defined in the ruleset. */
  RuleCount?: number | null;
  /** Metadata tags that have been applied to the ruleset. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /** The Amazon Resource Name (ARN) of a resource (dataset) that the ruleset is associated with. */
  TargetArn: string;
}
function toRulesetItem(root: jsonP.JSONValue): RulesetItem {
  return jsonP.readObj({
    required: {
      "Name": "s",
      "TargetArn": "s",
    },
    optional: {
      "AccountId": "s",
      "CreatedBy": "s",
      "CreateDate": "d",
      "Description": "s",
      "LastModifiedBy": "s",
      "LastModifiedDate": "d",
      "ResourceArn": "s",
      "RuleCount": "n",
      "Tags": x => jsonP.readMap(String, String, x),
    },
  }, root);
}

// refs: 1 - tags: output, named, interface
/** Represents one or more dates and times when a job is to run. */
export interface Schedule {
  /** The ID of the Amazon Web Services account that owns the schedule. */
  AccountId?: string | null;
  /** The Amazon Resource Name (ARN) of the user who created the schedule. */
  CreatedBy?: string | null;
  /** The date and time that the schedule was created. */
  CreateDate?: Date | number | null;
  /** A list of jobs to be run, according to the schedule. */
  JobNames?: string[] | null;
  /** The Amazon Resource Name (ARN) of the user who last modified the schedule. */
  LastModifiedBy?: string | null;
  /** The date and time when the schedule was last modified. */
  LastModifiedDate?: Date | number | null;
  /** The Amazon Resource Name (ARN) of the schedule. */
  ResourceArn?: string | null;
  /**
   * The dates and times when the job is to run.
   * For more information, see [Cron expressions](https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html) in the _Glue DataBrew Developer Guide_.
   */
  CronExpression?: string | null;
  /** Metadata tags that have been applied to the schedule. */
  Tags?: { [key: string]: string | null | undefined } | null;
  /** The name of the schedule. */
  Name: string;
}
function toSchedule(root: jsonP.JSONValue): Schedule {
  return jsonP.readObj({
    required: {
      "Name": "s",
    },
    optional: {
      "AccountId": "s",
      "CreatedBy": "s",
      "CreateDate": "d",
      "JobNames": ["s"],
      "LastModifiedBy": "s",
      "LastModifiedDate": "d",
      "ResourceArn": "s",
      "CronExpression": "s",
      "Tags": x => jsonP.readMap(String, String, x),
    },
  }, root);
}