datarobot.BatchPredictionJobDefinition
Explore with Pulumi AI
 
Batch Prediction Job Definition
Example Usage
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.datarobot.BatchPredictionJobDefinition;
import com.pulumi.datarobot.BatchPredictionJobDefinitionArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionIntakeSettingsArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionOutputSettingsArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionCsvSettingsArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionScheduleArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new BatchPredictionJobDefinition("example", BatchPredictionJobDefinitionArgs.builder()
            .deploymentId(datarobot_deployment.batch_prediction_job_definition().id())
            .intakeSettings(BatchPredictionJobDefinitionIntakeSettingsArgs.builder()
                .type("s3")
                .url("s3://datarobot-public-datasets-redistributable/1k_diabetes_simplified_features.csv")
                .credential_id(datarobot_basic_credential.batch_prediction_job_definition().id())
                .build())
            .outputSettings(BatchPredictionJobDefinitionOutputSettingsArgs.builder()
                .type("s3")
                .url("s3://my-test-bucket/predictions.csv")
                .credential_id(datarobot_basic_credential.batch_prediction_job_definition().id())
                .build())
            .csvSettings(BatchPredictionJobDefinitionCsvSettingsArgs.builder()
                .delimiter(".")
                .quotechar("'")
                .encoding("utf-8")
                .build())
            .numConcurrent(1)
            .chunkSize(10)
            .maxExplanations(5)
            .thresholdHigh(0.8)
            .thresholdLow(0.2)
            .predictionThreshold(0.5)
            .includePredictionStatus(true)
            .skipDriftTracking(true)
            .passthroughColumnsSet("all")
            .abortOnError(false)
            .includeProbabilities(true)
            .columnNamesRemapping(Map.of("col1", "newCol1"))
            .schedule(BatchPredictionJobDefinitionScheduleArgs.builder()
                .minute(                
                    "15",
                    "45")
                .hour("*")
                .month("*")
                .day_of_month("*")
                .day_of_week("*")
                .build())
            .build());
        ctx.export("exampleId", example.id());
    }
}
resources:
  example:
    type: datarobot:BatchPredictionJobDefinition
    properties:
      deploymentId: ${datarobot_deployment.batch_prediction_job_definition.id}
      intakeSettings:
        type: s3
        url: s3://datarobot-public-datasets-redistributable/1k_diabetes_simplified_features.csv
        credential_id: ${datarobot_basic_credential.batch_prediction_job_definition.id}
      # Optional parameters
      outputSettings:
        type: s3
        url: s3://my-test-bucket/predictions.csv
        credential_id: ${datarobot_basic_credential.batch_prediction_job_definition.id}
      csvSettings:
        delimiter: .
        quotechar: ''''
        encoding: utf-8
      numConcurrent: 1
      chunkSize: 10
      maxExplanations: 5
      thresholdHigh: 0.8
      thresholdLow: 0.2
      predictionThreshold: 0.5
      includePredictionStatus: true
      skipDriftTracking: true
      passthroughColumnsSet: all
      abortOnError: false
      includeProbabilities: true
      columnNamesRemapping:
        col1: newCol1
      schedule:
        minute:
          - '15'
          - '45'
        hour:
          - '*'
        month:
          - '*'
        day_of_month:
          - '*'
        day_of_week:
          - '*'
outputs:
  exampleId: ${example.id}
Create BatchPredictionJobDefinition Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new BatchPredictionJobDefinition(name: string, args: BatchPredictionJobDefinitionArgs, opts?: CustomResourceOptions);@overload
def BatchPredictionJobDefinition(resource_name: str,
                                 args: BatchPredictionJobDefinitionArgs,
                                 opts: Optional[ResourceOptions] = None)
@overload
def BatchPredictionJobDefinition(resource_name: str,
                                 opts: Optional[ResourceOptions] = None,
                                 deployment_id: Optional[str] = None,
                                 intake_settings: Optional[BatchPredictionJobDefinitionIntakeSettingsArgs] = None,
                                 name: Optional[str] = None,
                                 timeseries_settings: Optional[BatchPredictionJobDefinitionTimeseriesSettingsArgs] = None,
                                 abort_on_error: Optional[bool] = None,
                                 enabled: Optional[bool] = None,
                                 explanation_algorithm: Optional[str] = None,
                                 include_prediction_status: Optional[bool] = None,
                                 include_probabilities: Optional[bool] = None,
                                 include_probabilities_classes: Optional[Sequence[str]] = None,
                                 chunk_size: Optional[Any] = None,
                                 output_settings: Optional[BatchPredictionJobDefinitionOutputSettingsArgs] = None,
                                 column_names_remapping: Optional[Mapping[str, str]] = None,
                                 csv_settings: Optional[BatchPredictionJobDefinitionCsvSettingsArgs] = None,
                                 max_explanations: Optional[int] = None,
                                 passthrough_columns: Optional[Sequence[str]] = None,
                                 passthrough_columns_set: Optional[str] = None,
                                 prediction_instance: Optional[BatchPredictionJobDefinitionPredictionInstanceArgs] = None,
                                 prediction_threshold: Optional[float] = None,
                                 prediction_warning_enabled: Optional[bool] = None,
                                 schedule: Optional[BatchPredictionJobDefinitionScheduleArgs] = None,
                                 skip_drift_tracking: Optional[bool] = None,
                                 threshold_high: Optional[float] = None,
                                 threshold_low: Optional[float] = None,
                                 num_concurrent: Optional[int] = None)func NewBatchPredictionJobDefinition(ctx *Context, name string, args BatchPredictionJobDefinitionArgs, opts ...ResourceOption) (*BatchPredictionJobDefinition, error)public BatchPredictionJobDefinition(string name, BatchPredictionJobDefinitionArgs args, CustomResourceOptions? opts = null)
public BatchPredictionJobDefinition(String name, BatchPredictionJobDefinitionArgs args)
public BatchPredictionJobDefinition(String name, BatchPredictionJobDefinitionArgs args, CustomResourceOptions options)
type: datarobot:BatchPredictionJobDefinition
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var batchPredictionJobDefinitionResource = new Datarobot.BatchPredictionJobDefinition("batchPredictionJobDefinitionResource", new()
{
    DeploymentId = "string",
    IntakeSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionIntakeSettingsArgs
    {
        Type = "string",
        Catalog = "string",
        CredentialId = "string",
        DataStoreId = "string",
        DatasetId = "string",
        EndpointUrl = "string",
        FetchSize = 0,
        File = "string",
        Query = "string",
        Schema = "string",
        Table = "string",
        Url = "string",
    },
    Name = "string",
    TimeseriesSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionTimeseriesSettingsArgs
    {
        ForecastPoint = "string",
        PredictionsEndDate = "string",
        PredictionsStartDate = "string",
        RelaxKnownInAdvanceFeaturesCheck = false,
        Type = "string",
    },
    AbortOnError = false,
    Enabled = false,
    ExplanationAlgorithm = "string",
    IncludePredictionStatus = false,
    IncludeProbabilities = false,
    IncludeProbabilitiesClasses = new[]
    {
        "string",
    },
    ChunkSize = "any",
    OutputSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionOutputSettingsArgs
    {
        Catalog = "string",
        CreateTableIfNotExists = false,
        CredentialId = "string",
        DataStoreId = "string",
        EndpointUrl = "string",
        Path = "string",
        Schema = "string",
        StatementType = "string",
        Table = "string",
        Type = "string",
        UpdateColumns = new[]
        {
            "string",
        },
        Url = "string",
        WhereColumns = new[]
        {
            "string",
        },
    },
    ColumnNamesRemapping = 
    {
        { "string", "string" },
    },
    CsvSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionCsvSettingsArgs
    {
        Delimiter = "string",
        Encoding = "string",
        Quotechar = "string",
    },
    MaxExplanations = 0,
    PassthroughColumns = new[]
    {
        "string",
    },
    PassthroughColumnsSet = "string",
    PredictionInstance = new Datarobot.Inputs.BatchPredictionJobDefinitionPredictionInstanceArgs
    {
        HostName = "string",
        ApiKey = "string",
        DatarobotKey = "string",
        SslEnabled = false,
    },
    PredictionThreshold = 0,
    PredictionWarningEnabled = false,
    Schedule = new Datarobot.Inputs.BatchPredictionJobDefinitionScheduleArgs
    {
        DayOfMonths = new[]
        {
            "string",
        },
        DayOfWeeks = new[]
        {
            "string",
        },
        Hours = new[]
        {
            "string",
        },
        Minutes = new[]
        {
            "string",
        },
        Months = new[]
        {
            "string",
        },
    },
    SkipDriftTracking = false,
    ThresholdHigh = 0,
    ThresholdLow = 0,
    NumConcurrent = 0,
});
example, err := datarobot.NewBatchPredictionJobDefinition(ctx, "batchPredictionJobDefinitionResource", &datarobot.BatchPredictionJobDefinitionArgs{
	DeploymentId: pulumi.String("string"),
	IntakeSettings: &datarobot.BatchPredictionJobDefinitionIntakeSettingsArgs{
		Type:         pulumi.String("string"),
		Catalog:      pulumi.String("string"),
		CredentialId: pulumi.String("string"),
		DataStoreId:  pulumi.String("string"),
		DatasetId:    pulumi.String("string"),
		EndpointUrl:  pulumi.String("string"),
		FetchSize:    pulumi.Int(0),
		File:         pulumi.String("string"),
		Query:        pulumi.String("string"),
		Schema:       pulumi.String("string"),
		Table:        pulumi.String("string"),
		Url:          pulumi.String("string"),
	},
	Name: pulumi.String("string"),
	TimeseriesSettings: &datarobot.BatchPredictionJobDefinitionTimeseriesSettingsArgs{
		ForecastPoint:                    pulumi.String("string"),
		PredictionsEndDate:               pulumi.String("string"),
		PredictionsStartDate:             pulumi.String("string"),
		RelaxKnownInAdvanceFeaturesCheck: pulumi.Bool(false),
		Type:                             pulumi.String("string"),
	},
	AbortOnError:            pulumi.Bool(false),
	Enabled:                 pulumi.Bool(false),
	ExplanationAlgorithm:    pulumi.String("string"),
	IncludePredictionStatus: pulumi.Bool(false),
	IncludeProbabilities:    pulumi.Bool(false),
	IncludeProbabilitiesClasses: pulumi.StringArray{
		pulumi.String("string"),
	},
	ChunkSize: pulumi.Any("any"),
	OutputSettings: &datarobot.BatchPredictionJobDefinitionOutputSettingsArgs{
		Catalog:                pulumi.String("string"),
		CreateTableIfNotExists: pulumi.Bool(false),
		CredentialId:           pulumi.String("string"),
		DataStoreId:            pulumi.String("string"),
		EndpointUrl:            pulumi.String("string"),
		Path:                   pulumi.String("string"),
		Schema:                 pulumi.String("string"),
		StatementType:          pulumi.String("string"),
		Table:                  pulumi.String("string"),
		Type:                   pulumi.String("string"),
		UpdateColumns: pulumi.StringArray{
			pulumi.String("string"),
		},
		Url: pulumi.String("string"),
		WhereColumns: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	ColumnNamesRemapping: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	CsvSettings: &datarobot.BatchPredictionJobDefinitionCsvSettingsArgs{
		Delimiter: pulumi.String("string"),
		Encoding:  pulumi.String("string"),
		Quotechar: pulumi.String("string"),
	},
	MaxExplanations: pulumi.Int(0),
	PassthroughColumns: pulumi.StringArray{
		pulumi.String("string"),
	},
	PassthroughColumnsSet: pulumi.String("string"),
	PredictionInstance: &datarobot.BatchPredictionJobDefinitionPredictionInstanceArgs{
		HostName:     pulumi.String("string"),
		ApiKey:       pulumi.String("string"),
		DatarobotKey: pulumi.String("string"),
		SslEnabled:   pulumi.Bool(false),
	},
	PredictionThreshold:      pulumi.Float64(0),
	PredictionWarningEnabled: pulumi.Bool(false),
	Schedule: &datarobot.BatchPredictionJobDefinitionScheduleArgs{
		DayOfMonths: pulumi.StringArray{
			pulumi.String("string"),
		},
		DayOfWeeks: pulumi.StringArray{
			pulumi.String("string"),
		},
		Hours: pulumi.StringArray{
			pulumi.String("string"),
		},
		Minutes: pulumi.StringArray{
			pulumi.String("string"),
		},
		Months: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	SkipDriftTracking: pulumi.Bool(false),
	ThresholdHigh:     pulumi.Float64(0),
	ThresholdLow:      pulumi.Float64(0),
	NumConcurrent:     pulumi.Int(0),
})
var batchPredictionJobDefinitionResource = new BatchPredictionJobDefinition("batchPredictionJobDefinitionResource", BatchPredictionJobDefinitionArgs.builder()
    .deploymentId("string")
    .intakeSettings(BatchPredictionJobDefinitionIntakeSettingsArgs.builder()
        .type("string")
        .catalog("string")
        .credentialId("string")
        .dataStoreId("string")
        .datasetId("string")
        .endpointUrl("string")
        .fetchSize(0)
        .file("string")
        .query("string")
        .schema("string")
        .table("string")
        .url("string")
        .build())
    .name("string")
    .timeseriesSettings(BatchPredictionJobDefinitionTimeseriesSettingsArgs.builder()
        .forecastPoint("string")
        .predictionsEndDate("string")
        .predictionsStartDate("string")
        .relaxKnownInAdvanceFeaturesCheck(false)
        .type("string")
        .build())
    .abortOnError(false)
    .enabled(false)
    .explanationAlgorithm("string")
    .includePredictionStatus(false)
    .includeProbabilities(false)
    .includeProbabilitiesClasses("string")
    .chunkSize("any")
    .outputSettings(BatchPredictionJobDefinitionOutputSettingsArgs.builder()
        .catalog("string")
        .createTableIfNotExists(false)
        .credentialId("string")
        .dataStoreId("string")
        .endpointUrl("string")
        .path("string")
        .schema("string")
        .statementType("string")
        .table("string")
        .type("string")
        .updateColumns("string")
        .url("string")
        .whereColumns("string")
        .build())
    .columnNamesRemapping(Map.of("string", "string"))
    .csvSettings(BatchPredictionJobDefinitionCsvSettingsArgs.builder()
        .delimiter("string")
        .encoding("string")
        .quotechar("string")
        .build())
    .maxExplanations(0)
    .passthroughColumns("string")
    .passthroughColumnsSet("string")
    .predictionInstance(BatchPredictionJobDefinitionPredictionInstanceArgs.builder()
        .hostName("string")
        .apiKey("string")
        .datarobotKey("string")
        .sslEnabled(false)
        .build())
    .predictionThreshold(0)
    .predictionWarningEnabled(false)
    .schedule(BatchPredictionJobDefinitionScheduleArgs.builder()
        .dayOfMonths("string")
        .dayOfWeeks("string")
        .hours("string")
        .minutes("string")
        .months("string")
        .build())
    .skipDriftTracking(false)
    .thresholdHigh(0)
    .thresholdLow(0)
    .numConcurrent(0)
    .build());
batch_prediction_job_definition_resource = datarobot.BatchPredictionJobDefinition("batchPredictionJobDefinitionResource",
    deployment_id="string",
    intake_settings={
        "type": "string",
        "catalog": "string",
        "credential_id": "string",
        "data_store_id": "string",
        "dataset_id": "string",
        "endpoint_url": "string",
        "fetch_size": 0,
        "file": "string",
        "query": "string",
        "schema": "string",
        "table": "string",
        "url": "string",
    },
    name="string",
    timeseries_settings={
        "forecast_point": "string",
        "predictions_end_date": "string",
        "predictions_start_date": "string",
        "relax_known_in_advance_features_check": False,
        "type": "string",
    },
    abort_on_error=False,
    enabled=False,
    explanation_algorithm="string",
    include_prediction_status=False,
    include_probabilities=False,
    include_probabilities_classes=["string"],
    chunk_size="any",
    output_settings={
        "catalog": "string",
        "create_table_if_not_exists": False,
        "credential_id": "string",
        "data_store_id": "string",
        "endpoint_url": "string",
        "path": "string",
        "schema": "string",
        "statement_type": "string",
        "table": "string",
        "type": "string",
        "update_columns": ["string"],
        "url": "string",
        "where_columns": ["string"],
    },
    column_names_remapping={
        "string": "string",
    },
    csv_settings={
        "delimiter": "string",
        "encoding": "string",
        "quotechar": "string",
    },
    max_explanations=0,
    passthrough_columns=["string"],
    passthrough_columns_set="string",
    prediction_instance={
        "host_name": "string",
        "api_key": "string",
        "datarobot_key": "string",
        "ssl_enabled": False,
    },
    prediction_threshold=0,
    prediction_warning_enabled=False,
    schedule={
        "day_of_months": ["string"],
        "day_of_weeks": ["string"],
        "hours": ["string"],
        "minutes": ["string"],
        "months": ["string"],
    },
    skip_drift_tracking=False,
    threshold_high=0,
    threshold_low=0,
    num_concurrent=0)
const batchPredictionJobDefinitionResource = new datarobot.BatchPredictionJobDefinition("batchPredictionJobDefinitionResource", {
    deploymentId: "string",
    intakeSettings: {
        type: "string",
        catalog: "string",
        credentialId: "string",
        dataStoreId: "string",
        datasetId: "string",
        endpointUrl: "string",
        fetchSize: 0,
        file: "string",
        query: "string",
        schema: "string",
        table: "string",
        url: "string",
    },
    name: "string",
    timeseriesSettings: {
        forecastPoint: "string",
        predictionsEndDate: "string",
        predictionsStartDate: "string",
        relaxKnownInAdvanceFeaturesCheck: false,
        type: "string",
    },
    abortOnError: false,
    enabled: false,
    explanationAlgorithm: "string",
    includePredictionStatus: false,
    includeProbabilities: false,
    includeProbabilitiesClasses: ["string"],
    chunkSize: "any",
    outputSettings: {
        catalog: "string",
        createTableIfNotExists: false,
        credentialId: "string",
        dataStoreId: "string",
        endpointUrl: "string",
        path: "string",
        schema: "string",
        statementType: "string",
        table: "string",
        type: "string",
        updateColumns: ["string"],
        url: "string",
        whereColumns: ["string"],
    },
    columnNamesRemapping: {
        string: "string",
    },
    csvSettings: {
        delimiter: "string",
        encoding: "string",
        quotechar: "string",
    },
    maxExplanations: 0,
    passthroughColumns: ["string"],
    passthroughColumnsSet: "string",
    predictionInstance: {
        hostName: "string",
        apiKey: "string",
        datarobotKey: "string",
        sslEnabled: false,
    },
    predictionThreshold: 0,
    predictionWarningEnabled: false,
    schedule: {
        dayOfMonths: ["string"],
        dayOfWeeks: ["string"],
        hours: ["string"],
        minutes: ["string"],
        months: ["string"],
    },
    skipDriftTracking: false,
    thresholdHigh: 0,
    thresholdLow: 0,
    numConcurrent: 0,
});
type: datarobot:BatchPredictionJobDefinition
properties:
    abortOnError: false
    chunkSize: any
    columnNamesRemapping:
        string: string
    csvSettings:
        delimiter: string
        encoding: string
        quotechar: string
    deploymentId: string
    enabled: false
    explanationAlgorithm: string
    includePredictionStatus: false
    includeProbabilities: false
    includeProbabilitiesClasses:
        - string
    intakeSettings:
        catalog: string
        credentialId: string
        dataStoreId: string
        datasetId: string
        endpointUrl: string
        fetchSize: 0
        file: string
        query: string
        schema: string
        table: string
        type: string
        url: string
    maxExplanations: 0
    name: string
    numConcurrent: 0
    outputSettings:
        catalog: string
        createTableIfNotExists: false
        credentialId: string
        dataStoreId: string
        endpointUrl: string
        path: string
        schema: string
        statementType: string
        table: string
        type: string
        updateColumns:
            - string
        url: string
        whereColumns:
            - string
    passthroughColumns:
        - string
    passthroughColumnsSet: string
    predictionInstance:
        apiKey: string
        datarobotKey: string
        hostName: string
        sslEnabled: false
    predictionThreshold: 0
    predictionWarningEnabled: false
    schedule:
        dayOfMonths:
            - string
        dayOfWeeks:
            - string
        hours:
            - string
        minutes:
            - string
        months:
            - string
    skipDriftTracking: false
    thresholdHigh: 0
    thresholdLow: 0
    timeseriesSettings:
        forecastPoint: string
        predictionsEndDate: string
        predictionsStartDate: string
        relaxKnownInAdvanceFeaturesCheck: false
        type: string
BatchPredictionJobDefinition Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The BatchPredictionJobDefinition resource accepts the following input properties:
- DeploymentId string
- The ID of the deployment to use for the batch prediction job.
- IntakeSettings DataRobot Batch Prediction Job Definition Intake Settings 
- A dict configuring how data is coming from.
- AbortOn boolError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- ChunkSize object
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- ColumnNames Dictionary<string, string>Remapping 
- Mapping with column renaming for output table.
- CsvSettings DataRobot Batch Prediction Job Definition Csv Settings 
- CSV intake and output settings.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- ExplanationAlgorithm string
- Which algorithm will be used to calculate prediction explanations.
- IncludePrediction boolStatus 
- Include the prediction_status column in the output. Defaults to False.
- IncludeProbabilities bool
- Flag that enables returning of all probability columns. Defaults to True.
- IncludeProbabilities List<string>Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- MaxExplanations int
- Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- NumConcurrent int
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- OutputSettings DataRobot Batch Prediction Job Definition Output Settings 
- A dict configuring how scored data is to be saved.
- PassthroughColumns List<string>
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- PassthroughColumns stringSet 
- To pass through every column from the scoring dataset, set this to all.
- PredictionInstance DataRobot Batch Prediction Job Definition Prediction Instance 
- Defaults to instance specified by deployment or system configuration.
- PredictionThreshold double
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- PredictionWarning boolEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
DataRobot Batch Prediction Job Definition Schedule 
- Defines at what intervals the job should run.
- SkipDrift boolTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- ThresholdHigh double
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- ThresholdLow double
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- TimeseriesSettings DataRobot Batch Prediction Job Definition Timeseries Settings 
- Configuration for time-series scoring.
- DeploymentId string
- The ID of the deployment to use for the batch prediction job.
- IntakeSettings BatchPrediction Job Definition Intake Settings Args 
- A dict configuring how data is coming from.
- AbortOn boolError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- ChunkSize interface{}
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- ColumnNames map[string]stringRemapping 
- Mapping with column renaming for output table.
- CsvSettings BatchPrediction Job Definition Csv Settings Args 
- CSV intake and output settings.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- ExplanationAlgorithm string
- Which algorithm will be used to calculate prediction explanations.
- IncludePrediction boolStatus 
- Include the prediction_status column in the output. Defaults to False.
- IncludeProbabilities bool
- Flag that enables returning of all probability columns. Defaults to True.
- IncludeProbabilities []stringClasses 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- MaxExplanations int
- Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- NumConcurrent int
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- OutputSettings BatchPrediction Job Definition Output Settings Args 
- A dict configuring how scored data is to be saved.
- PassthroughColumns []string
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- PassthroughColumns stringSet 
- To pass through every column from the scoring dataset, set this to all.
- PredictionInstance BatchPrediction Job Definition Prediction Instance Args 
- Defaults to instance specified by deployment or system configuration.
- PredictionThreshold float64
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- PredictionWarning boolEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
BatchPrediction Job Definition Schedule Args 
- Defines at what intervals the job should run.
- SkipDrift boolTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- ThresholdHigh float64
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- ThresholdLow float64
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- TimeseriesSettings BatchPrediction Job Definition Timeseries Settings Args 
- Configuration for time-series scoring.
- deploymentId String
- The ID of the deployment to use for the batch prediction job.
- intakeSettings BatchPrediction Job Definition Intake Settings 
- A dict configuring how data is coming from.
- abortOn BooleanError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunkSize Object
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- columnNames Map<String,String>Remapping 
- Mapping with column renaming for output table.
- csvSettings BatchPrediction Job Definition Csv Settings 
- CSV intake and output settings.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanationAlgorithm String
- Which algorithm will be used to calculate prediction explanations.
- includePrediction BooleanStatus 
- Include the prediction_status column in the output. Defaults to False.
- includeProbabilities Boolean
- Flag that enables returning of all probability columns. Defaults to True.
- includeProbabilities List<String>Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- maxExplanations Integer
- Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- numConcurrent Integer
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- outputSettings BatchPrediction Job Definition Output Settings 
- A dict configuring how scored data is to be saved.
- passthroughColumns List<String>
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthroughColumns StringSet 
- To pass through every column from the scoring dataset, set this to all.
- predictionInstance BatchPrediction Job Definition Prediction Instance 
- Defaults to instance specified by deployment or system configuration.
- predictionThreshold Double
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- predictionWarning BooleanEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
BatchPrediction Job Definition Schedule 
- Defines at what intervals the job should run.
- skipDrift BooleanTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- thresholdHigh Double
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- thresholdLow Double
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseriesSettings BatchPrediction Job Definition Timeseries Settings 
- Configuration for time-series scoring.
- deploymentId string
- The ID of the deployment to use for the batch prediction job.
- intakeSettings BatchPrediction Job Definition Intake Settings 
- A dict configuring how data is coming from.
- abortOn booleanError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunkSize any
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- columnNames {[key: string]: string}Remapping 
- Mapping with column renaming for output table.
- csvSettings BatchPrediction Job Definition Csv Settings 
- CSV intake and output settings.
- enabled boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanationAlgorithm string
- Which algorithm will be used to calculate prediction explanations.
- includePrediction booleanStatus 
- Include the prediction_status column in the output. Defaults to False.
- includeProbabilities boolean
- Flag that enables returning of all probability columns. Defaults to True.
- includeProbabilities string[]Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- maxExplanations number
- Compute prediction explanations for this amount of features.
- name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- numConcurrent number
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- outputSettings BatchPrediction Job Definition Output Settings 
- A dict configuring how scored data is to be saved.
- passthroughColumns string[]
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthroughColumns stringSet 
- To pass through every column from the scoring dataset, set this to all.
- predictionInstance BatchPrediction Job Definition Prediction Instance 
- Defaults to instance specified by deployment or system configuration.
- predictionThreshold number
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- predictionWarning booleanEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
BatchPrediction Job Definition Schedule 
- Defines at what intervals the job should run.
- skipDrift booleanTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- thresholdHigh number
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- thresholdLow number
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseriesSettings BatchPrediction Job Definition Timeseries Settings 
- Configuration for time-series scoring.
- deployment_id str
- The ID of the deployment to use for the batch prediction job.
- intake_settings BatchPrediction Job Definition Intake Settings Args 
- A dict configuring how data is coming from.
- abort_on_ boolerror 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk_size Any
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column_names_ Mapping[str, str]remapping 
- Mapping with column renaming for output table.
- csv_settings BatchPrediction Job Definition Csv Settings Args 
- CSV intake and output settings.
- enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation_algorithm str
- Which algorithm will be used to calculate prediction explanations.
- include_prediction_ boolstatus 
- Include the prediction_status column in the output. Defaults to False.
- include_probabilities bool
- Flag that enables returning of all probability columns. Defaults to True.
- include_probabilities_ Sequence[str]classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- max_explanations int
- Compute prediction explanations for this amount of features.
- name str
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num_concurrent int
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output_settings BatchPrediction Job Definition Output Settings Args 
- A dict configuring how scored data is to be saved.
- passthrough_columns Sequence[str]
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough_columns_ strset 
- To pass through every column from the scoring dataset, set this to all.
- prediction_instance BatchPrediction Job Definition Prediction Instance Args 
- Defaults to instance specified by deployment or system configuration.
- prediction_threshold float
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction_warning_ boolenabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
BatchPrediction Job Definition Schedule Args 
- Defines at what intervals the job should run.
- skip_drift_ booltracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold_high float
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold_low float
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries_settings BatchPrediction Job Definition Timeseries Settings Args 
- Configuration for time-series scoring.
- deploymentId String
- The ID of the deployment to use for the batch prediction job.
- intakeSettings Property Map
- A dict configuring how data is coming from.
- abortOn BooleanError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunkSize Any
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- columnNames Map<String>Remapping 
- Mapping with column renaming for output table.
- csvSettings Property Map
- CSV intake and output settings.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanationAlgorithm String
- Which algorithm will be used to calculate prediction explanations.
- includePrediction BooleanStatus 
- Include the prediction_status column in the output. Defaults to False.
- includeProbabilities Boolean
- Flag that enables returning of all probability columns. Defaults to True.
- includeProbabilities List<String>Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- maxExplanations Number
- Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- numConcurrent Number
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- outputSettings Property Map
- A dict configuring how scored data is to be saved.
- passthroughColumns List<String>
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthroughColumns StringSet 
- To pass through every column from the scoring dataset, set this to all.
- predictionInstance Property Map
- Defaults to instance specified by deployment or system configuration.
- predictionThreshold Number
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- predictionWarning BooleanEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule Property Map
- Defines at what intervals the job should run.
- skipDrift BooleanTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- thresholdHigh Number
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- thresholdLow Number
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseriesSettings Property Map
- Configuration for time-series scoring.
Outputs
All input properties are implicitly available as output properties. Additionally, the BatchPredictionJobDefinition resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing BatchPredictionJobDefinition Resource
Get an existing BatchPredictionJobDefinition resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: BatchPredictionJobDefinitionState, opts?: CustomResourceOptions): BatchPredictionJobDefinition@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        abort_on_error: Optional[bool] = None,
        chunk_size: Optional[Any] = None,
        column_names_remapping: Optional[Mapping[str, str]] = None,
        csv_settings: Optional[BatchPredictionJobDefinitionCsvSettingsArgs] = None,
        deployment_id: Optional[str] = None,
        enabled: Optional[bool] = None,
        explanation_algorithm: Optional[str] = None,
        include_prediction_status: Optional[bool] = None,
        include_probabilities: Optional[bool] = None,
        include_probabilities_classes: Optional[Sequence[str]] = None,
        intake_settings: Optional[BatchPredictionJobDefinitionIntakeSettingsArgs] = None,
        max_explanations: Optional[int] = None,
        name: Optional[str] = None,
        num_concurrent: Optional[int] = None,
        output_settings: Optional[BatchPredictionJobDefinitionOutputSettingsArgs] = None,
        passthrough_columns: Optional[Sequence[str]] = None,
        passthrough_columns_set: Optional[str] = None,
        prediction_instance: Optional[BatchPredictionJobDefinitionPredictionInstanceArgs] = None,
        prediction_threshold: Optional[float] = None,
        prediction_warning_enabled: Optional[bool] = None,
        schedule: Optional[BatchPredictionJobDefinitionScheduleArgs] = None,
        skip_drift_tracking: Optional[bool] = None,
        threshold_high: Optional[float] = None,
        threshold_low: Optional[float] = None,
        timeseries_settings: Optional[BatchPredictionJobDefinitionTimeseriesSettingsArgs] = None) -> BatchPredictionJobDefinitionfunc GetBatchPredictionJobDefinition(ctx *Context, name string, id IDInput, state *BatchPredictionJobDefinitionState, opts ...ResourceOption) (*BatchPredictionJobDefinition, error)public static BatchPredictionJobDefinition Get(string name, Input<string> id, BatchPredictionJobDefinitionState? state, CustomResourceOptions? opts = null)public static BatchPredictionJobDefinition get(String name, Output<String> id, BatchPredictionJobDefinitionState state, CustomResourceOptions options)resources:  _:    type: datarobot:BatchPredictionJobDefinition    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AbortOn boolError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- ChunkSize object
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- ColumnNames Dictionary<string, string>Remapping 
- Mapping with column renaming for output table.
- CsvSettings DataRobot Batch Prediction Job Definition Csv Settings 
- CSV intake and output settings.
- DeploymentId string
- The ID of the deployment to use for the batch prediction job.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- ExplanationAlgorithm string
- Which algorithm will be used to calculate prediction explanations.
- IncludePrediction boolStatus 
- Include the prediction_status column in the output. Defaults to False.
- IncludeProbabilities bool
- Flag that enables returning of all probability columns. Defaults to True.
- IncludeProbabilities List<string>Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- IntakeSettings DataRobot Batch Prediction Job Definition Intake Settings 
- A dict configuring how data is coming from.
- MaxExplanations int
- Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- NumConcurrent int
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- OutputSettings DataRobot Batch Prediction Job Definition Output Settings 
- A dict configuring how scored data is to be saved.
- PassthroughColumns List<string>
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- PassthroughColumns stringSet 
- To pass through every column from the scoring dataset, set this to all.
- PredictionInstance DataRobot Batch Prediction Job Definition Prediction Instance 
- Defaults to instance specified by deployment or system configuration.
- PredictionThreshold double
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- PredictionWarning boolEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
DataRobot Batch Prediction Job Definition Schedule 
- Defines at what intervals the job should run.
- SkipDrift boolTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- ThresholdHigh double
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- ThresholdLow double
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- TimeseriesSettings DataRobot Batch Prediction Job Definition Timeseries Settings 
- Configuration for time-series scoring.
- AbortOn boolError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- ChunkSize interface{}
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- ColumnNames map[string]stringRemapping 
- Mapping with column renaming for output table.
- CsvSettings BatchPrediction Job Definition Csv Settings Args 
- CSV intake and output settings.
- DeploymentId string
- The ID of the deployment to use for the batch prediction job.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- ExplanationAlgorithm string
- Which algorithm will be used to calculate prediction explanations.
- IncludePrediction boolStatus 
- Include the prediction_status column in the output. Defaults to False.
- IncludeProbabilities bool
- Flag that enables returning of all probability columns. Defaults to True.
- IncludeProbabilities []stringClasses 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- IntakeSettings BatchPrediction Job Definition Intake Settings Args 
- A dict configuring how data is coming from.
- MaxExplanations int
- Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- NumConcurrent int
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- OutputSettings BatchPrediction Job Definition Output Settings Args 
- A dict configuring how scored data is to be saved.
- PassthroughColumns []string
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- PassthroughColumns stringSet 
- To pass through every column from the scoring dataset, set this to all.
- PredictionInstance BatchPrediction Job Definition Prediction Instance Args 
- Defaults to instance specified by deployment or system configuration.
- PredictionThreshold float64
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- PredictionWarning boolEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
BatchPrediction Job Definition Schedule Args 
- Defines at what intervals the job should run.
- SkipDrift boolTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- ThresholdHigh float64
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- ThresholdLow float64
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- TimeseriesSettings BatchPrediction Job Definition Timeseries Settings Args 
- Configuration for time-series scoring.
- abortOn BooleanError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunkSize Object
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- columnNames Map<String,String>Remapping 
- Mapping with column renaming for output table.
- csvSettings BatchPrediction Job Definition Csv Settings 
- CSV intake and output settings.
- deploymentId String
- The ID of the deployment to use for the batch prediction job.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanationAlgorithm String
- Which algorithm will be used to calculate prediction explanations.
- includePrediction BooleanStatus 
- Include the prediction_status column in the output. Defaults to False.
- includeProbabilities Boolean
- Flag that enables returning of all probability columns. Defaults to True.
- includeProbabilities List<String>Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intakeSettings BatchPrediction Job Definition Intake Settings 
- A dict configuring how data is coming from.
- maxExplanations Integer
- Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- numConcurrent Integer
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- outputSettings BatchPrediction Job Definition Output Settings 
- A dict configuring how scored data is to be saved.
- passthroughColumns List<String>
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthroughColumns StringSet 
- To pass through every column from the scoring dataset, set this to all.
- predictionInstance BatchPrediction Job Definition Prediction Instance 
- Defaults to instance specified by deployment or system configuration.
- predictionThreshold Double
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- predictionWarning BooleanEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
BatchPrediction Job Definition Schedule 
- Defines at what intervals the job should run.
- skipDrift BooleanTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- thresholdHigh Double
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- thresholdLow Double
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseriesSettings BatchPrediction Job Definition Timeseries Settings 
- Configuration for time-series scoring.
- abortOn booleanError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunkSize any
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- columnNames {[key: string]: string}Remapping 
- Mapping with column renaming for output table.
- csvSettings BatchPrediction Job Definition Csv Settings 
- CSV intake and output settings.
- deploymentId string
- The ID of the deployment to use for the batch prediction job.
- enabled boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanationAlgorithm string
- Which algorithm will be used to calculate prediction explanations.
- includePrediction booleanStatus 
- Include the prediction_status column in the output. Defaults to False.
- includeProbabilities boolean
- Flag that enables returning of all probability columns. Defaults to True.
- includeProbabilities string[]Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intakeSettings BatchPrediction Job Definition Intake Settings 
- A dict configuring how data is coming from.
- maxExplanations number
- Compute prediction explanations for this amount of features.
- name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- numConcurrent number
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- outputSettings BatchPrediction Job Definition Output Settings 
- A dict configuring how scored data is to be saved.
- passthroughColumns string[]
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthroughColumns stringSet 
- To pass through every column from the scoring dataset, set this to all.
- predictionInstance BatchPrediction Job Definition Prediction Instance 
- Defaults to instance specified by deployment or system configuration.
- predictionThreshold number
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- predictionWarning booleanEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
BatchPrediction Job Definition Schedule 
- Defines at what intervals the job should run.
- skipDrift booleanTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- thresholdHigh number
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- thresholdLow number
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseriesSettings BatchPrediction Job Definition Timeseries Settings 
- Configuration for time-series scoring.
- abort_on_ boolerror 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk_size Any
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column_names_ Mapping[str, str]remapping 
- Mapping with column renaming for output table.
- csv_settings BatchPrediction Job Definition Csv Settings Args 
- CSV intake and output settings.
- deployment_id str
- The ID of the deployment to use for the batch prediction job.
- enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation_algorithm str
- Which algorithm will be used to calculate prediction explanations.
- include_prediction_ boolstatus 
- Include the prediction_status column in the output. Defaults to False.
- include_probabilities bool
- Flag that enables returning of all probability columns. Defaults to True.
- include_probabilities_ Sequence[str]classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intake_settings BatchPrediction Job Definition Intake Settings Args 
- A dict configuring how data is coming from.
- max_explanations int
- Compute prediction explanations for this amount of features.
- name str
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num_concurrent int
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output_settings BatchPrediction Job Definition Output Settings Args 
- A dict configuring how scored data is to be saved.
- passthrough_columns Sequence[str]
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough_columns_ strset 
- To pass through every column from the scoring dataset, set this to all.
- prediction_instance BatchPrediction Job Definition Prediction Instance Args 
- Defaults to instance specified by deployment or system configuration.
- prediction_threshold float
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction_warning_ boolenabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
BatchPrediction Job Definition Schedule Args 
- Defines at what intervals the job should run.
- skip_drift_ booltracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold_high float
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold_low float
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries_settings BatchPrediction Job Definition Timeseries Settings Args 
- Configuration for time-series scoring.
- abortOn BooleanError 
- Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunkSize Any
- Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- columnNames Map<String>Remapping 
- Mapping with column renaming for output table.
- csvSettings Property Map
- CSV intake and output settings.
- deploymentId String
- The ID of the deployment to use for the batch prediction job.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanationAlgorithm String
- Which algorithm will be used to calculate prediction explanations.
- includePrediction BooleanStatus 
- Include the prediction_status column in the output. Defaults to False.
- includeProbabilities Boolean
- Flag that enables returning of all probability columns. Defaults to True.
- includeProbabilities List<String>Classes 
- List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intakeSettings Property Map
- A dict configuring how data is coming from.
- maxExplanations Number
- Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- numConcurrent Number
- Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- outputSettings Property Map
- A dict configuring how scored data is to be saved.
- passthroughColumns List<String>
- Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthroughColumns StringSet 
- To pass through every column from the scoring dataset, set this to all.
- predictionInstance Property Map
- Defaults to instance specified by deployment or system configuration.
- predictionThreshold Number
- Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- predictionWarning BooleanEnabled 
- Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule Property Map
- Defines at what intervals the job should run.
- skipDrift BooleanTracking 
- Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- thresholdHigh Number
- Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- thresholdLow Number
- Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseriesSettings Property Map
- Configuration for time-series scoring.
Supporting Types
BatchPredictionJobDefinitionCsvSettings, BatchPredictionJobDefinitionCsvSettingsArgs            
BatchPredictionJobDefinitionIntakeSettings, BatchPredictionJobDefinitionIntakeSettingsArgs            
- Type string
- Type of data source.
- Catalog string
- The name of specified database catalog for JDBC type.
- CredentialId string
- The ID of the credentials for S3 or JDBC data source.
- DataStore stringId 
- The ID of the external data store connected to the JDBC data source.
- DatasetId string
- The ID of the dataset to score for dataset type.
- EndpointUrl string
- Any non-default endpoint URL for S3 access.
- FetchSize int
- Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- File string
- String path to file of scoring data for localFile type.
- Query string
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- Schema string
- The name of specified database schema for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Url string
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- Type string
- Type of data source.
- Catalog string
- The name of specified database catalog for JDBC type.
- CredentialId string
- The ID of the credentials for S3 or JDBC data source.
- DataStore stringId 
- The ID of the external data store connected to the JDBC data source.
- DatasetId string
- The ID of the dataset to score for dataset type.
- EndpointUrl string
- Any non-default endpoint URL for S3 access.
- FetchSize int
- Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- File string
- String path to file of scoring data for localFile type.
- Query string
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- Schema string
- The name of specified database schema for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Url string
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type String
- Type of data source.
- catalog String
- The name of specified database catalog for JDBC type.
- credentialId String
- The ID of the credentials for S3 or JDBC data source.
- dataStore StringId 
- The ID of the external data store connected to the JDBC data source.
- datasetId String
- The ID of the dataset to score for dataset type.
- endpointUrl String
- Any non-default endpoint URL for S3 access.
- fetchSize Integer
- Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file String
- String path to file of scoring data for localFile type.
- query String
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema String
- The name of specified database schema for JDBC type.
- table String
- The name of specified database table for JDBC type.
- url String
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type string
- Type of data source.
- catalog string
- The name of specified database catalog for JDBC type.
- credentialId string
- The ID of the credentials for S3 or JDBC data source.
- dataStore stringId 
- The ID of the external data store connected to the JDBC data source.
- datasetId string
- The ID of the dataset to score for dataset type.
- endpointUrl string
- Any non-default endpoint URL for S3 access.
- fetchSize number
- Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file string
- String path to file of scoring data for localFile type.
- query string
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema string
- The name of specified database schema for JDBC type.
- table string
- The name of specified database table for JDBC type.
- url string
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type str
- Type of data source.
- catalog str
- The name of specified database catalog for JDBC type.
- credential_id str
- The ID of the credentials for S3 or JDBC data source.
- data_store_ strid 
- The ID of the external data store connected to the JDBC data source.
- dataset_id str
- The ID of the dataset to score for dataset type.
- endpoint_url str
- Any non-default endpoint URL for S3 access.
- fetch_size int
- Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file str
- String path to file of scoring data for localFile type.
- query str
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema str
- The name of specified database schema for JDBC type.
- table str
- The name of specified database table for JDBC type.
- url str
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type String
- Type of data source.
- catalog String
- The name of specified database catalog for JDBC type.
- credentialId String
- The ID of the credentials for S3 or JDBC data source.
- dataStore StringId 
- The ID of the external data store connected to the JDBC data source.
- datasetId String
- The ID of the dataset to score for dataset type.
- endpointUrl String
- Any non-default endpoint URL for S3 access.
- fetchSize Number
- Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file String
- String path to file of scoring data for localFile type.
- query String
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema String
- The name of specified database schema for JDBC type.
- table String
- The name of specified database table for JDBC type.
- url String
- The URL to score (e.g.: s3://bucket/key) for S3 type.
BatchPredictionJobDefinitionOutputSettings, BatchPredictionJobDefinitionOutputSettingsArgs            
- Catalog string
- The name of specified database catalog for JDBC type.
- CreateTable boolIf Not Exists 
- If no existing table is detected, attempt to create it before writing data for JDBC type.
- CredentialId string
- The ID of the credentials for S3 or JDBC data source.
- DataStore stringId 
- The ID of the external data store connected to the JDBC data source.
- EndpointUrl string
- Any non-default endpoint URL for S3 access.
- Path string
- Path to save the scored data as CSV for localFile type.
- Schema string
- The name of specified database schema for JDBC type.
- StatementType string
- The type of insertion statement to create for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Type string
- Type of output.
- UpdateColumns List<string>
- A list of strings containing those column names to be updated for JDBC type.
- Url string
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- WhereColumns List<string>
- A list of strings containing those column names to be selected for JDBC type.
- Catalog string
- The name of specified database catalog for JDBC type.
- CreateTable boolIf Not Exists 
- If no existing table is detected, attempt to create it before writing data for JDBC type.
- CredentialId string
- The ID of the credentials for S3 or JDBC data source.
- DataStore stringId 
- The ID of the external data store connected to the JDBC data source.
- EndpointUrl string
- Any non-default endpoint URL for S3 access.
- Path string
- Path to save the scored data as CSV for localFile type.
- Schema string
- The name of specified database schema for JDBC type.
- StatementType string
- The type of insertion statement to create for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Type string
- Type of output.
- UpdateColumns []string
- A list of strings containing those column names to be updated for JDBC type.
- Url string
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- WhereColumns []string
- A list of strings containing those column names to be selected for JDBC type.
- catalog String
- The name of specified database catalog for JDBC type.
- createTable BooleanIf Not Exists 
- If no existing table is detected, attempt to create it before writing data for JDBC type.
- credentialId String
- The ID of the credentials for S3 or JDBC data source.
- dataStore StringId 
- The ID of the external data store connected to the JDBC data source.
- endpointUrl String
- Any non-default endpoint URL for S3 access.
- path String
- Path to save the scored data as CSV for localFile type.
- schema String
- The name of specified database schema for JDBC type.
- statementType String
- The type of insertion statement to create for JDBC type.
- table String
- The name of specified database table for JDBC type.
- type String
- Type of output.
- updateColumns List<String>
- A list of strings containing those column names to be updated for JDBC type.
- url String
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- whereColumns List<String>
- A list of strings containing those column names to be selected for JDBC type.
- catalog string
- The name of specified database catalog for JDBC type.
- createTable booleanIf Not Exists 
- If no existing table is detected, attempt to create it before writing data for JDBC type.
- credentialId string
- The ID of the credentials for S3 or JDBC data source.
- dataStore stringId 
- The ID of the external data store connected to the JDBC data source.
- endpointUrl string
- Any non-default endpoint URL for S3 access.
- path string
- Path to save the scored data as CSV for localFile type.
- schema string
- The name of specified database schema for JDBC type.
- statementType string
- The type of insertion statement to create for JDBC type.
- table string
- The name of specified database table for JDBC type.
- type string
- Type of output.
- updateColumns string[]
- A list of strings containing those column names to be updated for JDBC type.
- url string
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- whereColumns string[]
- A list of strings containing those column names to be selected for JDBC type.
- catalog str
- The name of specified database catalog for JDBC type.
- create_table_ boolif_ not_ exists 
- If no existing table is detected, attempt to create it before writing data for JDBC type.
- credential_id str
- The ID of the credentials for S3 or JDBC data source.
- data_store_ strid 
- The ID of the external data store connected to the JDBC data source.
- endpoint_url str
- Any non-default endpoint URL for S3 access.
- path str
- Path to save the scored data as CSV for localFile type.
- schema str
- The name of specified database schema for JDBC type.
- statement_type str
- The type of insertion statement to create for JDBC type.
- table str
- The name of specified database table for JDBC type.
- type str
- Type of output.
- update_columns Sequence[str]
- A list of strings containing those column names to be updated for JDBC type.
- url str
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- where_columns Sequence[str]
- A list of strings containing those column names to be selected for JDBC type.
- catalog String
- The name of specified database catalog for JDBC type.
- createTable BooleanIf Not Exists 
- If no existing table is detected, attempt to create it before writing data for JDBC type.
- credentialId String
- The ID of the credentials for S3 or JDBC data source.
- dataStore StringId 
- The ID of the external data store connected to the JDBC data source.
- endpointUrl String
- Any non-default endpoint URL for S3 access.
- path String
- Path to save the scored data as CSV for localFile type.
- schema String
- The name of specified database schema for JDBC type.
- statementType String
- The type of insertion statement to create for JDBC type.
- table String
- The name of specified database table for JDBC type.
- type String
- Type of output.
- updateColumns List<String>
- A list of strings containing those column names to be updated for JDBC type.
- url String
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- whereColumns List<String>
- A list of strings containing those column names to be selected for JDBC type.
BatchPredictionJobDefinitionPredictionInstance, BatchPredictionJobDefinitionPredictionInstanceArgs            
- HostName string
- Hostname of the prediction instance.
- ApiKey string
- By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- DatarobotKey string
- If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- SslEnabled bool
- Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- HostName string
- Hostname of the prediction instance.
- ApiKey string
- By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- DatarobotKey string
- If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- SslEnabled bool
- Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- hostName String
- Hostname of the prediction instance.
- apiKey String
- By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobotKey String
- If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- sslEnabled Boolean
- Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- hostName string
- Hostname of the prediction instance.
- apiKey string
- By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobotKey string
- If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- sslEnabled boolean
- Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- host_name str
- Hostname of the prediction instance.
- api_key str
- By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobot_key str
- If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- ssl_enabled bool
- Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- hostName String
- Hostname of the prediction instance.
- apiKey String
- By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobotKey String
- If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- sslEnabled Boolean
- Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
BatchPredictionJobDefinitionSchedule, BatchPredictionJobDefinitionScheduleArgs          
- DayOf List<string>Months 
- Days of the month when the job will run.
- DayOf List<string>Weeks 
- Days of the week when the job will run.
- Hours List<string>
- Hours of the day when the job will run.
- Minutes List<string>
- Minutes of the day when the job will run.
- Months List<string>
- Months of the year when the job will run.
- DayOf []stringMonths 
- Days of the month when the job will run.
- DayOf []stringWeeks 
- Days of the week when the job will run.
- Hours []string
- Hours of the day when the job will run.
- Minutes []string
- Minutes of the day when the job will run.
- Months []string
- Months of the year when the job will run.
- dayOf List<String>Months 
- Days of the month when the job will run.
- dayOf List<String>Weeks 
- Days of the week when the job will run.
- hours List<String>
- Hours of the day when the job will run.
- minutes List<String>
- Minutes of the day when the job will run.
- months List<String>
- Months of the year when the job will run.
- dayOf string[]Months 
- Days of the month when the job will run.
- dayOf string[]Weeks 
- Days of the week when the job will run.
- hours string[]
- Hours of the day when the job will run.
- minutes string[]
- Minutes of the day when the job will run.
- months string[]
- Months of the year when the job will run.
- day_of_ Sequence[str]months 
- Days of the month when the job will run.
- day_of_ Sequence[str]weeks 
- Days of the week when the job will run.
- hours Sequence[str]
- Hours of the day when the job will run.
- minutes Sequence[str]
- Minutes of the day when the job will run.
- months Sequence[str]
- Months of the year when the job will run.
- dayOf List<String>Months 
- Days of the month when the job will run.
- dayOf List<String>Weeks 
- Days of the week when the job will run.
- hours List<String>
- Hours of the day when the job will run.
- minutes List<String>
- Minutes of the day when the job will run.
- months List<String>
- Months of the year when the job will run.
BatchPredictionJobDefinitionTimeseriesSettings, BatchPredictionJobDefinitionTimeseriesSettingsArgs            
- ForecastPoint string
- Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- PredictionsEnd stringDate 
- End date for historical predictions. May be passed if timeseries_settings.type=historical.
- PredictionsStart stringDate 
- Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- RelaxKnown boolIn Advance Features Check 
- If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- Type string
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- ForecastPoint string
- Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- PredictionsEnd stringDate 
- End date for historical predictions. May be passed if timeseries_settings.type=historical.
- PredictionsStart stringDate 
- Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- RelaxKnown boolIn Advance Features Check 
- If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- Type string
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecastPoint String
- Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictionsEnd StringDate 
- End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictionsStart StringDate 
- Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relaxKnown BooleanIn Advance Features Check 
- If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type String
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecastPoint string
- Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictionsEnd stringDate 
- End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictionsStart stringDate 
- Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relaxKnown booleanIn Advance Features Check 
- If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type string
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecast_point str
- Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictions_end_ strdate 
- End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictions_start_ strdate 
- Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relax_known_ boolin_ advance_ features_ check 
- If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type str
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecastPoint String
- Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictionsEnd StringDate 
- End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictionsStart StringDate 
- Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relaxKnown BooleanIn Advance Features Check 
- If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type String
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
Package Details
- Repository
- datarobot datarobot-community/pulumi-datarobot
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the datarobotTerraform Provider.
