diff --git a/eng/mgmt/mgmtmetadata/datafactory_resource-manager.txt b/eng/mgmt/mgmtmetadata/datafactory_resource-manager.txt new file mode 100644 index 000000000000..c59cf14e6880 --- /dev/null +++ b/eng/mgmt/mgmtmetadata/datafactory_resource-manager.txt @@ -0,0 +1,14 @@ +Installing AutoRest version: latest +AutoRest installed successfully. +Commencing code generation +Generating CSharp code +Executing AutoRest command +cmd.exe /c autorest.cmd https://github.com/Azure/azure-rest-api-specs/blob/master/specification/datafactory/resource-manager/readme.md --csharp --version=latest --reflect-api-versions --tag=package-2018-06 --csharp-sdks-folder=D:\Repos\azure-sdk-for-net\sdk +2019-07-26 10:54:28 UTC +Azure-rest-api-specs repository information +GitHub fork: Azure +Branch: master +Commit: 7936730ee4e746dd7af1a0a97b20ba4779a2a35c +AutoRest information +Requested version: latest +Bootstrapper version: autorest@2.0.4283 diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureDataExplorerCommandActivity.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureDataExplorerCommandActivity.cs new file mode 100644 index 000000000000..affb4aa218d9 --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureDataExplorerCommandActivity.cs @@ -0,0 +1,98 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Microsoft.Rest; + using Microsoft.Rest.Serialization; + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// Azure Data Explorer command activity. + /// + [Newtonsoft.Json.JsonObject("AzureDataExplorerCommand")] + [Rest.Serialization.JsonTransformation] + public partial class AzureDataExplorerCommandActivity : ExecutionActivity + { + /// + /// Initializes a new instance of the AzureDataExplorerCommandActivity + /// class. + /// + public AzureDataExplorerCommandActivity() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the AzureDataExplorerCommandActivity + /// class. + /// + /// Activity name. + /// A control command, according to the Azure + /// Data Explorer command syntax. Type: string (or Expression with + /// resultType string). + /// Unmatched properties from the + /// message are deserialized this collection + /// Activity description. + /// Activity depends on condition. + /// Activity user properties. + /// Linked service reference. + /// Activity policy. + /// Control command timeout. Type: string + /// (or Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))..) + public AzureDataExplorerCommandActivity(string name, object command, IDictionary additionalProperties = default(IDictionary), string description = default(string), IList dependsOn = default(IList), IList userProperties = default(IList), LinkedServiceReference linkedServiceName = default(LinkedServiceReference), ActivityPolicy policy = default(ActivityPolicy), object commandTimeout = default(object)) + : base(name, additionalProperties, description, dependsOn, userProperties, linkedServiceName, policy) + { + Command = command; + CommandTimeout = commandTimeout; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets a control command, according to the Azure Data + /// Explorer command syntax. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "typeProperties.command")] + public object Command { get; set; } + + /// + /// Gets or sets control command timeout. Type: string (or Expression + /// with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))..) + /// + [JsonProperty(PropertyName = "typeProperties.commandTimeout")] + public object CommandTimeout { get; set; } + + /// + /// Validate the object. + /// + /// + /// Thrown if validation fails + /// + public override void Validate() + { + base.Validate(); + if (Command == null) + { + throw new ValidationException(ValidationRules.CannotBeNull, "Command"); + } + } + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzurePostgreSqlSink.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzurePostgreSqlSink.cs new file mode 100644 index 000000000000..b7948c955b27 --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzurePostgreSqlSink.cs @@ -0,0 +1,71 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// A copy activity Azure PostgreSQL sink. + /// + public partial class AzurePostgreSqlSink : CopySink + { + /// + /// Initializes a new instance of the AzurePostgreSqlSink class. + /// + public AzurePostgreSqlSink() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the AzurePostgreSqlSink class. + /// + /// Unmatched properties from the + /// message are deserialized this collection + /// Write batch size. Type: integer (or + /// Expression with resultType integer), minimum: 0. + /// Write batch timeout. Type: string + /// (or Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// Sink retry count. Type: integer (or + /// Expression with resultType integer). + /// Sink retry wait. Type: string (or + /// Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// The maximum concurrent + /// connection count for the sink data store. Type: integer (or + /// Expression with resultType integer). + /// A query to execute before starting the + /// copy. Type: string (or Expression with resultType string). + public AzurePostgreSqlSink(IDictionary additionalProperties = default(IDictionary), object writeBatchSize = default(object), object writeBatchTimeout = default(object), object sinkRetryCount = default(object), object sinkRetryWait = default(object), object maxConcurrentConnections = default(object), object preCopyScript = default(object)) + : base(additionalProperties, writeBatchSize, writeBatchTimeout, sinkRetryCount, sinkRetryWait, maxConcurrentConnections) + { + PreCopyScript = preCopyScript; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets a query to execute before starting the copy. Type: + /// string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "preCopyScript")] + public object PreCopyScript { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzurePostgreSqlTableDataset.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzurePostgreSqlTableDataset.cs index a010188aff4c..cad0ef71175e 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzurePostgreSqlTableDataset.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzurePostgreSqlTableDataset.cs @@ -53,12 +53,21 @@ public AzurePostgreSqlTableDataset() /// describing the Dataset. /// The folder that this Dataset is in. If not /// specified, Dataset will appear at the root level. - /// The table name. Type: string (or Expression - /// with resultType string). - public AzurePostgreSqlTableDataset(LinkedServiceReference linkedServiceName, IDictionary additionalProperties = default(IDictionary), string description = default(string), object structure = default(object), object schema = default(object), IDictionary parameters = default(IDictionary), IList annotations = default(IList), DatasetFolder folder = default(DatasetFolder), object tableName = default(object)) + /// The table name of the Azure PostgreSQL + /// database which includes both schema and table. Type: string (or + /// Expression with resultType string). + /// The table name of the Azure PostgreSQL + /// database. Type: string (or Expression with resultType + /// string). + /// The schema name of + /// the Azure PostgreSQL database. Type: string (or Expression with + /// resultType string). + public AzurePostgreSqlTableDataset(LinkedServiceReference linkedServiceName, IDictionary additionalProperties = default(IDictionary), string description = default(string), object structure = default(object), object schema = default(object), IDictionary parameters = default(IDictionary), IList annotations = default(IList), DatasetFolder folder = default(DatasetFolder), object tableName = default(object), object table = default(object), object azurePostgreSqlTableDatasetSchema = default(object)) : base(linkedServiceName, additionalProperties, description, structure, schema, parameters, annotations, folder) { TableName = tableName; + Table = table; + AzurePostgreSqlTableDatasetSchema = azurePostgreSqlTableDatasetSchema; CustomInit(); } @@ -68,12 +77,27 @@ public AzurePostgreSqlTableDataset() partial void CustomInit(); /// - /// Gets or sets the table name. Type: string (or Expression with + /// Gets or sets the table name of the Azure PostgreSQL database which + /// includes both schema and table. Type: string (or Expression with /// resultType string). /// [JsonProperty(PropertyName = "typeProperties.tableName")] public object TableName { get; set; } + /// + /// Gets or sets the table name of the Azure PostgreSQL database. Type: + /// string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "typeProperties.table")] + public object Table { get; set; } + + /// + /// Gets or sets the schema name of the Azure PostgreSQL database. + /// Type: string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "typeProperties.schema")] + public object AzurePostgreSqlTableDatasetSchema { get; set; } + /// /// Validate the object. /// diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/CosmosDbMongoDbApiSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/CosmosDbMongoDbApiSource.cs index f44f573edc92..629a30a6bfa5 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/CosmosDbMongoDbApiSource.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/CosmosDbMongoDbApiSource.cs @@ -50,7 +50,7 @@ public CosmosDbMongoDbApiSource() /// Specifies the number of documents to return /// in each batch of the response from MongoDB instance. In most cases, /// modifying the batch size will not affect the user or the - /// application. This property�s main purpose is to avoid hit the + /// application. This property's main purpose is to avoid hit the /// limitation of response size. Type: integer (or Expression with /// resultType integer). public CosmosDbMongoDbApiSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object filter = default(object), MongoDbCursorMethodsProperties cursorMethods = default(MongoDbCursorMethodsProperties), object batchSize = default(object)) @@ -86,7 +86,7 @@ public CosmosDbMongoDbApiSource() /// Gets or sets specifies the number of documents to return in each /// batch of the response from MongoDB instance. In most cases, /// modifying the batch size will not affect the user or the - /// application. This property�s main purpose is to avoid hit the + /// application. This property's main purpose is to avoid hit the /// limitation of response size. Type: integer (or Expression with /// resultType integer). /// diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/Db2Source.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/Db2Source.cs new file mode 100644 index 000000000000..69b7e158b3f6 --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/Db2Source.cs @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// A copy activity source for Db2 databases. + /// + public partial class Db2Source : CopySource + { + /// + /// Initializes a new instance of the Db2Source class. + /// + public Db2Source() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the Db2Source class. + /// + /// Unmatched properties from the + /// message are deserialized this collection + /// Source retry count. Type: integer + /// (or Expression with resultType integer). + /// Source retry wait. Type: string (or + /// Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// The maximum concurrent + /// connection count for the source data store. Type: integer (or + /// Expression with resultType integer). + /// Database query. Type: string (or Expression + /// with resultType string). + public Db2Source(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object)) + : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) + { + Query = query; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets database query. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "query")] + public object Query { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MongoDbV2Source.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MongoDbV2Source.cs index 409681b340d5..b4e7e625d4bc 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MongoDbV2Source.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MongoDbV2Source.cs @@ -50,7 +50,7 @@ public MongoDbV2Source() /// Specifies the number of documents to return /// in each batch of the response from MongoDB instance. In most cases, /// modifying the batch size will not affect the user or the - /// application. This property�s main purpose is to avoid hit the + /// application. This property's main purpose is to avoid hit the /// limitation of response size. Type: integer (or Expression with /// resultType integer). public MongoDbV2Source(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object filter = default(object), MongoDbCursorMethodsProperties cursorMethods = default(MongoDbCursorMethodsProperties), object batchSize = default(object)) @@ -86,7 +86,7 @@ public MongoDbV2Source() /// Gets or sets specifies the number of documents to return in each /// batch of the response from MongoDB instance. In most cases, /// modifying the batch size will not affect the user or the - /// application. This property�s main purpose is to avoid hit the + /// application. This property's main purpose is to avoid hit the /// limitation of response size. Type: integer (or Expression with /// resultType integer). /// diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MySqlSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MySqlSource.cs new file mode 100644 index 000000000000..dd47f8f58f8c --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MySqlSource.cs @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// A copy activity source for MySQL databases. + /// + public partial class MySqlSource : CopySource + { + /// + /// Initializes a new instance of the MySqlSource class. + /// + public MySqlSource() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the MySqlSource class. + /// + /// Unmatched properties from the + /// message are deserialized this collection + /// Source retry count. Type: integer + /// (or Expression with resultType integer). + /// Source retry wait. Type: string (or + /// Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// The maximum concurrent + /// connection count for the source data store. Type: integer (or + /// Expression with resultType integer). + /// Database query. Type: string (or Expression + /// with resultType string). + public MySqlSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object)) + : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) + { + Query = query; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets database query. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "query")] + public object Query { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MySqlTableDataset.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MySqlTableDataset.cs new file mode 100644 index 000000000000..7a49a8989ecb --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/MySqlTableDataset.cs @@ -0,0 +1,86 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Microsoft.Rest; + using Microsoft.Rest.Serialization; + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// The MySQL table dataset. + /// + [Newtonsoft.Json.JsonObject("MySqlTable")] + [Rest.Serialization.JsonTransformation] + public partial class MySqlTableDataset : Dataset + { + /// + /// Initializes a new instance of the MySqlTableDataset class. + /// + public MySqlTableDataset() + { + LinkedServiceName = new LinkedServiceReference(); + CustomInit(); + } + + /// + /// Initializes a new instance of the MySqlTableDataset class. + /// + /// Linked service reference. + /// Unmatched properties from the + /// message are deserialized this collection + /// Dataset description. + /// Columns that define the structure of the + /// dataset. Type: array (or Expression with resultType array), + /// itemType: DatasetDataElement. + /// Columns that define the physical type schema + /// of the dataset. Type: array (or Expression with resultType array), + /// itemType: DatasetSchemaDataElement. + /// Parameters for dataset. + /// List of tags that can be used for + /// describing the Dataset. + /// The folder that this Dataset is in. If not + /// specified, Dataset will appear at the root level. + /// The MySQL table name. Type: string (or + /// Expression with resultType string). + public MySqlTableDataset(LinkedServiceReference linkedServiceName, IDictionary additionalProperties = default(IDictionary), string description = default(string), object structure = default(object), object schema = default(object), IDictionary parameters = default(IDictionary), IList annotations = default(IList), DatasetFolder folder = default(DatasetFolder), object tableName = default(object)) + : base(linkedServiceName, additionalProperties, description, structure, schema, parameters, annotations, folder) + { + TableName = tableName; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets the MySQL table name. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "typeProperties.tableName")] + public object TableName { get; set; } + + /// + /// Validate the object. + /// + /// + /// Thrown if validation fails + /// + public override void Validate() + { + base.Validate(); + } + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaPartitionOption.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaPartitionOption.cs new file mode 100644 index 000000000000..5e73e2448eb2 --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaPartitionOption.cs @@ -0,0 +1,23 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + + /// + /// Defines values for NetezzaPartitionOption. + /// + public static class NetezzaPartitionOption + { + public const string None = "None"; + public const string DataSlice = "DataSlice"; + public const string DynamicRange = "DynamicRange"; + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaPartitionSettings.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaPartitionSettings.cs new file mode 100644 index 000000000000..7dd219fd5c4f --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaPartitionSettings.cs @@ -0,0 +1,81 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Linq; + + /// + /// The settings that will be leveraged for Netezza source partitioning. + /// + public partial class NetezzaPartitionSettings + { + /// + /// Initializes a new instance of the NetezzaPartitionSettings class. + /// + public NetezzaPartitionSettings() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the NetezzaPartitionSettings class. + /// + /// The name of the column in integer + /// type that will be used for proceeding range partitioning. Type: + /// string (or Expression with resultType string). + /// The maximum value of column + /// specified in partitionColumnName that will be used for proceeding + /// range partitioning. Type: string (or Expression with resultType + /// string). + /// The minimum value of column + /// specified in partitionColumnName that will be used for proceeding + /// range partitioning. Type: string (or Expression with resultType + /// string). + public NetezzaPartitionSettings(object partitionColumnName = default(object), object partitionUpperBound = default(object), object partitionLowerBound = default(object)) + { + PartitionColumnName = partitionColumnName; + PartitionUpperBound = partitionUpperBound; + PartitionLowerBound = partitionLowerBound; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets the name of the column in integer type that will be + /// used for proceeding range partitioning. Type: string (or Expression + /// with resultType string). + /// + [JsonProperty(PropertyName = "partitionColumnName")] + public object PartitionColumnName { get; set; } + + /// + /// Gets or sets the maximum value of column specified in + /// partitionColumnName that will be used for proceeding range + /// partitioning. Type: string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "partitionUpperBound")] + public object PartitionUpperBound { get; set; } + + /// + /// Gets or sets the minimum value of column specified in + /// partitionColumnName that will be used for proceeding range + /// partitioning. Type: string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "partitionLowerBound")] + public object PartitionLowerBound { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaSource.cs index 41fb8666ba84..dca14ad1dcf3 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaSource.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/NetezzaSource.cs @@ -43,10 +43,17 @@ public NetezzaSource() /// Expression with resultType integer). /// A query to retrieve data from source. Type: /// string (or Expression with resultType string). - public NetezzaSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object)) + /// The partition mechanism that will be + /// used for Netezza read in parallel. Possible values include: 'None', + /// 'DataSlice', 'DynamicRange' + /// The settings that will be leveraged + /// for Netezza source partitioning. + public NetezzaSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object), string partitionOption = default(string), NetezzaPartitionSettings partitionSettings = default(NetezzaPartitionSettings)) : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) { Query = query; + PartitionOption = partitionOption; + PartitionSettings = partitionSettings; CustomInit(); } @@ -62,5 +69,20 @@ public NetezzaSource() [JsonProperty(PropertyName = "query")] public object Query { get; set; } + /// + /// Gets or sets the partition mechanism that will be used for Netezza + /// read in parallel. Possible values include: 'None', 'DataSlice', + /// 'DynamicRange' + /// + [JsonProperty(PropertyName = "partitionOption")] + public string PartitionOption { get; set; } + + /// + /// Gets or sets the settings that will be leveraged for Netezza source + /// partitioning. + /// + [JsonProperty(PropertyName = "partitionSettings")] + public NetezzaPartitionSettings PartitionSettings { get; set; } + } } diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/ODataSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/ODataSource.cs new file mode 100644 index 000000000000..809d174d7beb --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/ODataSource.cs @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// A copy activity source for OData source. + /// + public partial class ODataSource : CopySource + { + /// + /// Initializes a new instance of the ODataSource class. + /// + public ODataSource() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the ODataSource class. + /// + /// Unmatched properties from the + /// message are deserialized this collection + /// Source retry count. Type: integer + /// (or Expression with resultType integer). + /// Source retry wait. Type: string (or + /// Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// The maximum concurrent + /// connection count for the source data store. Type: integer (or + /// Expression with resultType integer). + /// OData query. For example, "$top=1". Type: + /// string (or Expression with resultType string). + public ODataSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object)) + : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) + { + Query = query; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets oData query. For example, "$top=1". Type: string (or + /// Expression with resultType string). + /// + [JsonProperty(PropertyName = "query")] + public object Query { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OdbcSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OdbcSource.cs new file mode 100644 index 000000000000..7e229f98275a --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OdbcSource.cs @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// A copy activity source for ODBC databases. + /// + public partial class OdbcSource : CopySource + { + /// + /// Initializes a new instance of the OdbcSource class. + /// + public OdbcSource() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the OdbcSource class. + /// + /// Unmatched properties from the + /// message are deserialized this collection + /// Source retry count. Type: integer + /// (or Expression with resultType integer). + /// Source retry wait. Type: string (or + /// Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// The maximum concurrent + /// connection count for the source data store. Type: integer (or + /// Expression with resultType integer). + /// Database query. Type: string (or Expression + /// with resultType string). + public OdbcSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object)) + : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) + { + Query = query; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets database query. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "query")] + public object Query { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OdbcTableDataset.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OdbcTableDataset.cs new file mode 100644 index 000000000000..c86afef7707b --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OdbcTableDataset.cs @@ -0,0 +1,86 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Microsoft.Rest; + using Microsoft.Rest.Serialization; + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// The ODBC table dataset. + /// + [Newtonsoft.Json.JsonObject("OdbcTable")] + [Rest.Serialization.JsonTransformation] + public partial class OdbcTableDataset : Dataset + { + /// + /// Initializes a new instance of the OdbcTableDataset class. + /// + public OdbcTableDataset() + { + LinkedServiceName = new LinkedServiceReference(); + CustomInit(); + } + + /// + /// Initializes a new instance of the OdbcTableDataset class. + /// + /// Linked service reference. + /// Unmatched properties from the + /// message are deserialized this collection + /// Dataset description. + /// Columns that define the structure of the + /// dataset. Type: array (or Expression with resultType array), + /// itemType: DatasetDataElement. + /// Columns that define the physical type schema + /// of the dataset. Type: array (or Expression with resultType array), + /// itemType: DatasetSchemaDataElement. + /// Parameters for dataset. + /// List of tags that can be used for + /// describing the Dataset. + /// The folder that this Dataset is in. If not + /// specified, Dataset will appear at the root level. + /// The ODBC table name. Type: string (or + /// Expression with resultType string). + public OdbcTableDataset(LinkedServiceReference linkedServiceName, IDictionary additionalProperties = default(IDictionary), string description = default(string), object structure = default(object), object schema = default(object), IDictionary parameters = default(IDictionary), IList annotations = default(IList), DatasetFolder folder = default(DatasetFolder), object tableName = default(object)) + : base(linkedServiceName, additionalProperties, description, structure, schema, parameters, annotations, folder) + { + TableName = tableName; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets the ODBC table name. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "typeProperties.tableName")] + public object TableName { get; set; } + + /// + /// Validate the object. + /// + /// + /// Thrown if validation fails + /// + public override void Validate() + { + base.Validate(); + } + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OraclePartitionOption.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OraclePartitionOption.cs new file mode 100644 index 000000000000..82a50533c64f --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OraclePartitionOption.cs @@ -0,0 +1,23 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + + /// + /// Defines values for OraclePartitionOption. + /// + public static class OraclePartitionOption + { + public const string None = "None"; + public const string PhysicalPartitionsOfTable = "PhysicalPartitionsOfTable"; + public const string DynamicRange = "DynamicRange"; + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OraclePartitionSettings.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OraclePartitionSettings.cs new file mode 100644 index 000000000000..67712035661d --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OraclePartitionSettings.cs @@ -0,0 +1,90 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Linq; + + /// + /// The settings that will be leveraged for Oracle source partitioning. + /// + public partial class OraclePartitionSettings + { + /// + /// Initializes a new instance of the OraclePartitionSettings class. + /// + public OraclePartitionSettings() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the OraclePartitionSettings class. + /// + /// Names of the physical partitions of + /// Oracle table. + /// The name of the column in integer + /// type that will be used for proceeding range partitioning. Type: + /// string (or Expression with resultType string). + /// The maximum value of column + /// specified in partitionColumnName that will be used for proceeding + /// range partitioning. Type: string (or Expression with resultType + /// string). + /// The minimum value of column + /// specified in partitionColumnName that will be used for proceeding + /// range partitioning. Type: string (or Expression with resultType + /// string). + public OraclePartitionSettings(object partitionNames = default(object), object partitionColumnName = default(object), object partitionUpperBound = default(object), object partitionLowerBound = default(object)) + { + PartitionNames = partitionNames; + PartitionColumnName = partitionColumnName; + PartitionUpperBound = partitionUpperBound; + PartitionLowerBound = partitionLowerBound; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets names of the physical partitions of Oracle table. + /// + [JsonProperty(PropertyName = "partitionNames")] + public object PartitionNames { get; set; } + + /// + /// Gets or sets the name of the column in integer type that will be + /// used for proceeding range partitioning. Type: string (or Expression + /// with resultType string). + /// + [JsonProperty(PropertyName = "partitionColumnName")] + public object PartitionColumnName { get; set; } + + /// + /// Gets or sets the maximum value of column specified in + /// partitionColumnName that will be used for proceeding range + /// partitioning. Type: string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "partitionUpperBound")] + public object PartitionUpperBound { get; set; } + + /// + /// Gets or sets the minimum value of column specified in + /// partitionColumnName that will be used for proceeding range + /// partitioning. Type: string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "partitionLowerBound")] + public object PartitionLowerBound { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OracleSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OracleSource.cs index cc29ade4062a..34b73c2d741f 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OracleSource.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/OracleSource.cs @@ -46,11 +46,18 @@ public OracleSource() /// Query timeout. Type: string (or /// Expression with resultType string), pattern: /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). - public OracleSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object oracleReaderQuery = default(object), object queryTimeout = default(object)) + /// The partition mechanism that will be + /// used for Oracle read in parallel. Possible values include: 'None', + /// 'PhysicalPartitionsOfTable', 'DynamicRange' + /// The settings that will be leveraged + /// for Oracle source partitioning. + public OracleSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object oracleReaderQuery = default(object), object queryTimeout = default(object), string partitionOption = default(string), OraclePartitionSettings partitionSettings = default(OraclePartitionSettings)) : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) { OracleReaderQuery = oracleReaderQuery; QueryTimeout = queryTimeout; + PartitionOption = partitionOption; + PartitionSettings = partitionSettings; CustomInit(); } @@ -74,5 +81,20 @@ public OracleSource() [JsonProperty(PropertyName = "queryTimeout")] public object QueryTimeout { get; set; } + /// + /// Gets or sets the partition mechanism that will be used for Oracle + /// read in parallel. Possible values include: 'None', + /// 'PhysicalPartitionsOfTable', 'DynamicRange' + /// + [JsonProperty(PropertyName = "partitionOption")] + public string PartitionOption { get; set; } + + /// + /// Gets or sets the settings that will be leveraged for Oracle source + /// partitioning. + /// + [JsonProperty(PropertyName = "partitionSettings")] + public OraclePartitionSettings PartitionSettings { get; set; } + } } diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/PostgreSqlSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/PostgreSqlSource.cs new file mode 100644 index 000000000000..e8d01f8b1af3 --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/PostgreSqlSource.cs @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// A copy activity source for PostgreSQL databases. + /// + public partial class PostgreSqlSource : CopySource + { + /// + /// Initializes a new instance of the PostgreSqlSource class. + /// + public PostgreSqlSource() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the PostgreSqlSource class. + /// + /// Unmatched properties from the + /// message are deserialized this collection + /// Source retry count. Type: integer + /// (or Expression with resultType integer). + /// Source retry wait. Type: string (or + /// Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// The maximum concurrent + /// connection count for the source data store. Type: integer (or + /// Expression with resultType integer). + /// Database query. Type: string (or Expression + /// with resultType string). + public PostgreSqlSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object)) + : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) + { + Query = query; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets database query. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "query")] + public object Query { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/PostgreSqlTableDataset.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/PostgreSqlTableDataset.cs new file mode 100644 index 000000000000..eaf05eb41919 --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/PostgreSqlTableDataset.cs @@ -0,0 +1,86 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Microsoft.Rest; + using Microsoft.Rest.Serialization; + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// The PostgreSQL table dataset. + /// + [Newtonsoft.Json.JsonObject("PostgreSqlTable")] + [Rest.Serialization.JsonTransformation] + public partial class PostgreSqlTableDataset : Dataset + { + /// + /// Initializes a new instance of the PostgreSqlTableDataset class. + /// + public PostgreSqlTableDataset() + { + LinkedServiceName = new LinkedServiceReference(); + CustomInit(); + } + + /// + /// Initializes a new instance of the PostgreSqlTableDataset class. + /// + /// Linked service reference. + /// Unmatched properties from the + /// message are deserialized this collection + /// Dataset description. + /// Columns that define the structure of the + /// dataset. Type: array (or Expression with resultType array), + /// itemType: DatasetDataElement. + /// Columns that define the physical type schema + /// of the dataset. Type: array (or Expression with resultType array), + /// itemType: DatasetSchemaDataElement. + /// Parameters for dataset. + /// List of tags that can be used for + /// describing the Dataset. + /// The folder that this Dataset is in. If not + /// specified, Dataset will appear at the root level. + /// The PostgreSQL table name. Type: string (or + /// Expression with resultType string). + public PostgreSqlTableDataset(LinkedServiceReference linkedServiceName, IDictionary additionalProperties = default(IDictionary), string description = default(string), object structure = default(object), object schema = default(object), IDictionary parameters = default(IDictionary), IList annotations = default(IList), DatasetFolder folder = default(DatasetFolder), object tableName = default(object)) + : base(linkedServiceName, additionalProperties, description, structure, schema, parameters, annotations, folder) + { + TableName = tableName; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets the PostgreSQL table name. Type: string (or Expression + /// with resultType string). + /// + [JsonProperty(PropertyName = "typeProperties.tableName")] + public object TableName { get; set; } + + /// + /// Validate the object. + /// + /// + /// Thrown if validation fails + /// + public override void Validate() + { + base.Validate(); + } + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTablePartitionOption.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTablePartitionOption.cs new file mode 100644 index 000000000000..184057d28197 --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTablePartitionOption.cs @@ -0,0 +1,26 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + + /// + /// Defines values for SapTablePartitionOption. + /// + public static class SapTablePartitionOption + { + public const string None = "None"; + public const string PartitionOnInt = "PartitionOnInt"; + public const string PartitionOnCalendarYear = "PartitionOnCalendarYear"; + public const string PartitionOnCalendarMonth = "PartitionOnCalendarMonth"; + public const string PartitionOnCalendarDate = "PartitionOnCalendarDate"; + public const string PartitionOnTime = "PartitionOnTime"; + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTablePartitionSettings.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTablePartitionSettings.cs new file mode 100644 index 000000000000..2876a9248c0d --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTablePartitionSettings.cs @@ -0,0 +1,92 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Linq; + + /// + /// The settings that will be leveraged for SAP table source partitioning. + /// + public partial class SapTablePartitionSettings + { + /// + /// Initializes a new instance of the SapTablePartitionSettings class. + /// + public SapTablePartitionSettings() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the SapTablePartitionSettings class. + /// + /// The name of the column that will + /// be used for proceeding range partitioning. Type: string (or + /// Expression with resultType string). + /// The maximum value of column + /// specified in partitionColumnName that will be used for proceeding + /// range partitioning. Type: string (or Expression with resultType + /// string). + /// The minimum value of column + /// specified in partitionColumnName that will be used for proceeding + /// range partitioning. Type: string (or Expression with resultType + /// string). + /// The maximum value of partitions + /// the table will be split into. Type: integer (or Expression with + /// resultType string). + public SapTablePartitionSettings(object partitionColumnName = default(object), object partitionUpperBound = default(object), object partitionLowerBound = default(object), object maxPartitionsNumber = default(object)) + { + PartitionColumnName = partitionColumnName; + PartitionUpperBound = partitionUpperBound; + PartitionLowerBound = partitionLowerBound; + MaxPartitionsNumber = maxPartitionsNumber; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets the name of the column that will be used for + /// proceeding range partitioning. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "partitionColumnName")] + public object PartitionColumnName { get; set; } + + /// + /// Gets or sets the maximum value of column specified in + /// partitionColumnName that will be used for proceeding range + /// partitioning. Type: string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "partitionUpperBound")] + public object PartitionUpperBound { get; set; } + + /// + /// Gets or sets the minimum value of column specified in + /// partitionColumnName that will be used for proceeding range + /// partitioning. Type: string (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "partitionLowerBound")] + public object PartitionLowerBound { get; set; } + + /// + /// Gets or sets the maximum value of partitions the table will be + /// split into. Type: integer (or Expression with resultType string). + /// + [JsonProperty(PropertyName = "maxPartitionsNumber")] + public object MaxPartitionsNumber { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTableSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTableSource.cs index 6c5cc0423fc8..a09a9c5f21d3 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTableSource.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapTableSource.cs @@ -57,7 +57,14 @@ public SapTableSource() /// Specifies the custom /// RFC function module that will be used to read data from SAP Table. /// Type: string (or Expression with resultType string). - public SapTableSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object rowCount = default(object), object rowSkips = default(object), object rfcTableFields = default(object), object rfcTableOptions = default(object), object batchSize = default(object), object customRfcReadTableFunctionModule = default(object)) + /// The partition mechanism that will be + /// used for SAP table read in parallel. Possible values include: + /// 'None', 'PartitionOnInt', 'PartitionOnCalendarYear', + /// 'PartitionOnCalendarMonth', 'PartitionOnCalendarDate', + /// 'PartitionOnTime' + /// The settings that will be leveraged + /// for SAP table source partitioning. + public SapTableSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object rowCount = default(object), object rowSkips = default(object), object rfcTableFields = default(object), object rfcTableOptions = default(object), object batchSize = default(object), object customRfcReadTableFunctionModule = default(object), string partitionOption = default(string), SapTablePartitionSettings partitionSettings = default(SapTablePartitionSettings)) : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) { RowCount = rowCount; @@ -66,6 +73,8 @@ public SapTableSource() RfcTableOptions = rfcTableOptions; BatchSize = batchSize; CustomRfcReadTableFunctionModule = customRfcReadTableFunctionModule; + PartitionOption = partitionOption; + PartitionSettings = partitionSettings; CustomInit(); } @@ -120,5 +129,22 @@ public SapTableSource() [JsonProperty(PropertyName = "customRfcReadTableFunctionModule")] public object CustomRfcReadTableFunctionModule { get; set; } + /// + /// Gets or sets the partition mechanism that will be used for SAP + /// table read in parallel. Possible values include: 'None', + /// 'PartitionOnInt', 'PartitionOnCalendarYear', + /// 'PartitionOnCalendarMonth', 'PartitionOnCalendarDate', + /// 'PartitionOnTime' + /// + [JsonProperty(PropertyName = "partitionOption")] + public string PartitionOption { get; set; } + + /// + /// Gets or sets the settings that will be leveraged for SAP table + /// source partitioning. + /// + [JsonProperty(PropertyName = "partitionSettings")] + public SapTablePartitionSettings PartitionSettings { get; set; } + } } diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SybaseSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SybaseSource.cs new file mode 100644 index 000000000000..e09535de814a --- /dev/null +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SybaseSource.cs @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. +// + +namespace Microsoft.Azure.Management.DataFactory.Models +{ + using Newtonsoft.Json; + using System.Collections; + using System.Collections.Generic; + using System.Linq; + + /// + /// A copy activity source for Sybase databases. + /// + public partial class SybaseSource : CopySource + { + /// + /// Initializes a new instance of the SybaseSource class. + /// + public SybaseSource() + { + CustomInit(); + } + + /// + /// Initializes a new instance of the SybaseSource class. + /// + /// Unmatched properties from the + /// message are deserialized this collection + /// Source retry count. Type: integer + /// (or Expression with resultType integer). + /// Source retry wait. Type: string (or + /// Expression with resultType string), pattern: + /// ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])). + /// The maximum concurrent + /// connection count for the source data store. Type: integer (or + /// Expression with resultType integer). + /// Database query. Type: string (or Expression + /// with resultType string). + public SybaseSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object query = default(object)) + : base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections) + { + Query = query; + CustomInit(); + } + + /// + /// An initialization method that performs custom operations like setting defaults + /// + partial void CustomInit(); + + /// + /// Gets or sets database query. Type: string (or Expression with + /// resultType string). + /// + [JsonProperty(PropertyName = "query")] + public object Query { get; set; } + + } +} diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs index afb3bebe6aa9..140179d40a90 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs @@ -36,5 +36,16 @@ public static IEnumerable> ApiInfo_DataFactoryMana }.AsEnumerable(); } } + // BEGIN: Code Generation Metadata Section + public static readonly String AutoRestVersion = "latest"; + public static readonly String AutoRestBootStrapperVersion = "autorest@2.0.4283"; + public static readonly String AutoRestCmdExecuted = "cmd.exe /c autorest.cmd https://github.com/Azure/azure-rest-api-specs/blob/master/specification/datafactory/resource-manager/readme.md --csharp --version=latest --reflect-api-versions --tag=package-2018-06 --csharp-sdks-folder=D:\\Repos\\azure-sdk-for-net\\sdk"; + public static readonly String GithubForkName = "Azure"; + public static readonly String GithubBranchName = "master"; + public static readonly String GithubCommidId = "7936730ee4e746dd7af1a0a97b20ba4779a2a35c"; + public static readonly String CodeGenerationErrors = ""; + public static readonly String GithubRepoName = "azure-rest-api-specs"; + // END: Code Generation Metadata Section } } + diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Microsoft.Azure.Management.DataFactory.csproj b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Microsoft.Azure.Management.DataFactory.csproj index 54546b7ab60f..eff4f6e63331 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Microsoft.Azure.Management.DataFactory.csproj +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Microsoft.Azure.Management.DataFactory.csproj @@ -5,15 +5,15 @@ Microsoft.Azure.Management.DataFactory Azure Data Factory V2 is the data integration platform that goes beyond Azure Data Factory V1's orchestration and batch-processing of time-series data, with a general purpose app model supporting modern data warehousing patterns and scenarios, lift-and-shift SSIS, and data-driven SaaS applications. Compose and manage reliable and secure data integration workflows at scale. Use native ADF data connectors and Integration Runtimes to move and transform cloud and on-premises data that can be unstructured, semi-structured, and structured with Hadoop, Azure Data Lake, Spark, SQL Server, Cosmos DB and many other data platforms. - 4.1.0 + 4.1.1 Microsoft.Azure.Management.DataFactory Microsoft Azure resource management;Data Factory;ADF; diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Properties/AssemblyInfo.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Properties/AssemblyInfo.cs index a510e37c1599..4c15e1ab57a9 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Properties/AssemblyInfo.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Properties/AssemblyInfo.cs @@ -7,7 +7,7 @@ [assembly: AssemblyTitle("Microsoft Azure Data Factory Management Library")] [assembly: AssemblyDescription("Provides management functionality for Microsoft Azure Data Factory Resources.")] [assembly: AssemblyVersion("4.1.0.0")] -[assembly: AssemblyFileVersion("4.1.0.0")] +[assembly: AssemblyFileVersion("4.1.1.0")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("Microsoft")] [assembly: AssemblyProduct("Microsoft Azure .NET SDK")] diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/changelog.md b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/changelog.md index 0e44cf042348..6d4289248454 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/changelog.md +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/changelog.md @@ -3,6 +3,14 @@ ## Current version ### Feature Additions +## Version 4.1.1 +### Feature Additions +- Add PostgreSql Sink +- Add Partition settings for Oracle, SapTable +- Add dedicated source and dataset for several relational sources including ODBC, MySql +- Add Azure Data Explorer Command activity, which allows users executing Kusto Control Commands +- Update PostgreSql Dataset to use a dedicated AzurePostgreSqlTableDatasetTypeProperties + ## Version 4.1.0 ### Feature Additions ### Breaking Changes diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/DatasetJsonSamples.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/DatasetJsonSamples.cs index 2abd2ba812c5..92069d6365c1 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/DatasetJsonSamples.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/DatasetJsonSamples.cs @@ -1157,5 +1157,122 @@ public class DatasetJsonSamples : JsonSampleCollection } } "; + + [JsonSample] + public const string AzurePostgreSqlTable = @" +{ + name: ""AzurePostgreSqlTable"", + properties: + { + type: ""AzurePostgreSqlTable"", + linkedServiceName: + { + referenceName : ""ls"", + type : ""LinkedServiceReference"" + }, + typeProperties: + { + tableName: ""$EncryptedString$MyEncryptedTableName"" + } + } +} +"; + + [JsonSample] + public const string MySqlTable = @" +{ + name: ""MySqlTable"", + properties: + { + type: ""MySqlTable"", + linkedServiceName: + { + referenceName : ""ls"", + type : ""LinkedServiceReference"" + }, + typeProperties: + { + tableName: ""$EncryptedString$MyEncryptedTableName"" + } + } +} +"; + + [JsonSample] + public const string AzurePostgreSqlTableAndSchema = @" + { + name: ""AzurePostgreSqlTable"", + properties: + { + type: ""AzurePostgreSqlTable"", + linkedServiceName: + { + referenceName : ""ls"", + type : ""LinkedServiceReference"" + }, + typeProperties: + { + table: ""$EncryptedString$MyEncryptedTableName"", + schema: ""$EncryptedString$MyEncryptedSchemaName"" + } + } + } + "; + + [JsonSample] + public const string OdbcTable = @" + { + name: ""OdbcTable"", + properties: + { + type: ""OdbcTable"", + linkedServiceName: + { + referenceName : ""ls"", + type : ""LinkedServiceReference"" + }, + typeProperties: + { + tableName: ""$EncryptedString$MyEncryptedTableName"" + } + } + } + "; + + [JsonSample] + public const string AzureDataExplorerTable = @" + { + name: ""AzureDataExplorerTable"", + properties: + { + type: ""AzureDataExplorerTable"", + linkedServiceName: + { + referenceName : ""ls"", + type : ""LinkedServiceReference"" + } + } + } + "; + + [JsonSample] + public const string AzureDataExplorerWithTablePropertyTable = @" + { + name: ""AzureDataExplorerTable"", + properties: + { + type: ""AzureDataExplorerTable"", + typeProperties: + { + table: ""myTable"" + }, + linkedServiceName: + { + referenceName : ""ls"", + type : ""LinkedServiceReference"" + } + } + } + "; } } diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/LinkedServiceJsonSamples.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/LinkedServiceJsonSamples.cs index c0070f04809c..0f08f7e561ed 100644 --- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/LinkedServiceJsonSamples.cs +++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/tests/JsonSamples/LinkedServiceJsonSamples.cs @@ -2228,5 +2228,24 @@ public class LinkedServiceJsonSamples : JsonSampleCollection ] } }"; + + [JsonSample(version: "Copy")] + public const string CopySapTableWithPartitionToAdls = @" +{ + name: ""MyPipelineName"", + properties: + { + description : ""Copy from SAP Table to Azure Data Lake Store"", + activities: + [ + { + type: ""Copy"", + name: ""TestActivity"", + description: ""Test activity description"", + typeProperties: + { + source: + { + type: ""SapTableSource"", + rowCount: 3, + partitionOption: ""PartitionOnCalendarDate"", + partitionSettings: + { + ""partitionColumnName"": ""fakeColumn"", + ""partitionUpperBound"": ""20190405"", + ""partitionLowerBound"": ""20170809"", + ""maxPartitionsNumber"": 3 + } + }, + sink: + { + type: ""AzureDataLakeStoreSink"", + copyBehavior: ""FlattenHierarchy"" + } + }, + inputs: + [ + { + referenceName: ""InputSapTable"", type: ""DatasetReference"" + } + ], + outputs: + [ + { + referenceName: ""OutputAdlsDA"", type: ""DatasetReference"" + } + ], + linkedServiceName: { referenceName: ""MyLinkedServiceName"", type: ""LinkedServiceReference"" }, + policy: + { + retry: 3, + timeout: ""00:00:05"", + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string Db2SourcePipeline = @" +{ + name: ""DataPipeline_Db2Sample"", + properties: + { + activities: + [ + { + name: ""Db2ToBlobCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""Db2Source"", + query: ""select * from faketable"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string AzurePostgreSqlSourcePipeline = @" +{ + name: ""DataPipeline_PostgreSqlSample"", + properties: + { + activities: + [ + { + name: ""Db2ToPostgreSqlCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""Db2Source"", + query: ""select * from faketable"" + }, + sink: + { + type: ""AzurePostgreSqlSink"", + preCopyScript: ""fake script"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string OraclePartitionSourcePipeline = @" +{ + name: ""DataPipeline_OraclePartitionSample"", + properties: + { + activities: + [ + { + name: ""OraclePartitionSourceToBlobCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""OracleSource"", + partitionOption: ""DynamicRange"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + [JsonSample(version: "Copy")] + public const string NetezzaPartitionSourcePipeline = @" +{ + name: ""DataPipeline_NetezzaPartitionSample"", + properties: + { + activities: + [ + { + name: ""NetezzaPartitionSourceToBlobCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""NetezzaSource"", + partitionOption: ""DataSlice"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + [JsonSample(version: "Copy")] + public const string ODataSourcePipeline = @" +{ + name: ""DataPipeline_ODataSample"", + properties: + { + activities: + [ + { + name: ""ODataToPostgreSqlCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""ODataSource"", + query: ""$top=1"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string SybaseSourcePipeline = @" +{ + name: ""DataPipeline_SybaseSample"", + properties: + { + activities: + [ + { + name: ""SybaseToBlobCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""SybaseSource"", + query: ""select * from faketable"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string MySqlSourcePipeline = @" +{ + name: ""DataPipeline_MySqlSample"", + properties: + { + activities: + [ + { + name: ""MySqlToBlobCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""MySqlSource"", + query: ""select * from faketable"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string OdbcSourcePipeline = @" +{ + name: ""DataPipeline_OdbcSample"", + properties: + { + activities: + [ + { + name: ""OdbcToBlobCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""MySqlSource"", + query: ""select * from faketable"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string AmazonRedshiftSourcePipeline = @" +{ + name: ""DataPipeline_OdbcSample"", + properties: + { + activities: + [ + { + name: ""AmazonRedshiftToBlobCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""AmazonRedshiftSource"", + query: ""select * from faketable"" + }, + sink: + { + type: ""BlobSink"", + writeBatchSize: 1000000, + writeBatchTimeout: ""01:00:00"" + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample(version: "Copy")] + public const string AzureDataExplorerPipeline = @" +{ + name: ""DataPipeline_AzureDataExplorerSample"", + properties: + { + activities: + [ + { + name: ""MyAzureDataExplorerCopyActivity"", + inputs: [ {referenceName: ""DA_Input"", type: ""DatasetReference""} ], + outputs: [ {referenceName: ""DA_Output"", type: ""DatasetReference""} ], + type: ""Copy"", + typeProperties: + { + source: + { + type: ""AzureDataExplorerSource"", + query: ""CustomLogEvent | top 10 by TIMESTAMP | project TIMESTAMP, Tenant, EventId, ActivityId"", + noTruncation: false, + queryTimeout: ""00:00:15"" + }, + sink: + { + type: ""AzureDataExplorerSink"", + ingestionMappingName: ""MappingName"", + ingestionMappingAsJson: ""Mapping"", + flushImmediately: true + } + }, + policy: + { + retry: 2, + timeout: ""01:00:00"" + } + } + ] + } +} +"; + + [JsonSample] + public const string AzureDataExploreCommandActivityPipeline = @" +{ + name: ""MyKustoActivityPipeline"", + properties: { + activities: [ + { + name: ""MyKustoActivity"", + type: ""AzureDataExplorerCommand"", + typeProperties: { + command: ""TestTable1 | take 10"" + } + } + ] + } +} +"; + + [JsonSample] + public const string AzureDataExploreCommandActivityWithTimeoutPipeline = @" +{ + name: ""MyKustoActivityPipeline"", + properties: { + activities: [ + { + name: ""MyKustoActivity"", + type: ""AzureDataExplorerCommand"", + typeProperties: { + command: ""TestTable1 | take 10"", + commandTimeout: ""00:10:00"" + } + } + ] + } +} +"; } }