diff --git a/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopy.xml b/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopy.xml
index 461ece2b2a..5e8f58d414 100644
--- a/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopy.xml
+++ b/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopy.xml
@@ -235,6 +235,35 @@ This code is provided to demonstrate the syntax for using **SqlBulkCopy** only.
]]>
+
+
+ Clears the cached destination table metadata when using the
+
+ option.
+
+
+
+ Call this method when you know the destination table schema
+ has changed and you want to force the next
+ WriteToServer operation to refresh the metadata
+ from the server.
+
+
+ The cache is automatically invalidated when the
+
+ property is changed to a different table name.
+
+
+ The cache is not automatically invalidated when the
+ connection context changes. Call this method if the
+ underlying
+
+ changes database (for example, via
+ )
+ or reconnects to a different server due to failover.
+
+
+
Enables or disables a object to stream data from an object
diff --git a/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopyOptions.xml b/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopyOptions.xml
index a4ac472666..c49537e4d1 100644
--- a/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopyOptions.xml
+++ b/doc/snippets/Microsoft.Data.SqlClient/SqlBulkCopyOptions.xml
@@ -68,5 +68,46 @@ To see how the option changes the way the bulk load works, run the sample with t
When specified, each batch of the bulk-copy operation will occur within a transaction. If you indicate this option and also provide a object to the constructor, an occurs.
+
+
+
+ When specified, CacheMetadata caches destination table
+ metadata after the first bulk copy operation, allowing
+ subsequent operations to the same table to skip the metadata
+ discovery query. This can improve performance when performing
+ multiple bulk copy operations to the same destination table.
+
+
+ Warning: Use this option only when you are certain the
+ destination table schema will not change between bulk copy
+ operations. If the table schema changes (columns added,
+ removed, or modified), using cached metadata may result in
+ data corruption, failed operations, or unexpected behavior.
+ Call
+
+ to clear the cache if the schema changes.
+
+
+ The cache is automatically invalidated when
+
+ is changed to a different table. Changing
+
+ between operations does not require cache invalidation
+ because the cached metadata describes only the destination
+ table schema, not the source-to-destination column mapping.
+
+
+ The cache is not automatically invalidated when the
+ connection context changes. If the underlying
+
+ changes database (for example, via
+ )
+ or reconnects to a different server due to failover, callers
+ should call
+
+ to ensure the metadata is refreshed.
+
+
+
diff --git a/src/Microsoft.Data.SqlClient/ref/Microsoft.Data.SqlClient.cs b/src/Microsoft.Data.SqlClient/ref/Microsoft.Data.SqlClient.cs
index 9e820a5224..69a29c5b02 100644
--- a/src/Microsoft.Data.SqlClient/ref/Microsoft.Data.SqlClient.cs
+++ b/src/Microsoft.Data.SqlClient/ref/Microsoft.Data.SqlClient.cs
@@ -212,6 +212,8 @@ public SqlBulkCopy(string connectionString, Microsoft.Data.SqlClient.SqlBulkCopy
public event Microsoft.Data.SqlClient.SqlRowsCopiedEventHandler SqlRowsCopied { add { } remove { } }
///
public void Close() { }
+ ///
+ public void ClearCachedMetadata() { }
///
void System.IDisposable.Dispose() { }
///
@@ -343,6 +345,8 @@ public enum SqlBulkCopyOptions
{
///
AllowEncryptedValueModifications = 64,
+ ///
+ CacheMetadata = 128,
///
CheckConstraints = 2,
///
diff --git a/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopy.cs b/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopy.cs
index 39e4f570c7..cbd74f73d6 100644
--- a/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopy.cs
+++ b/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopy.cs
@@ -235,6 +235,13 @@ private int RowNumber
private SourceColumnMetadata[] _currentRowMetadata;
+ // Metadata caching fields for CacheMetadata option
+ internal BulkCopySimpleResultSet CachedMetadata { get; private set; }
+ // Per-operation clone of the destination table metadata, used when CacheMetadata is
+ // enabled so that column-pruning in AnalyzeTargetAndCreateUpdateBulkCommand does not
+ // mutate the cached BulkCopySimpleResultSet.
+ private _SqlMetaDataSet _operationMetaData;
+
#if DEBUG
internal static bool s_setAlwaysTaskOnWrite; //when set and in DEBUG mode, TdsParser::WriteBulkCopyValue will always return a task
internal static bool SetAlwaysTaskOnWrite
@@ -353,6 +360,12 @@ public string DestinationTableName
{
throw ADP.ArgumentOutOfRange(nameof(DestinationTableName));
}
+ else if (string.Equals(_destinationTableName, value, StringComparison.Ordinal))
+ {
+ return;
+ }
+
+ CachedMetadata = null;
_destinationTableName = value;
}
}
@@ -497,6 +510,14 @@ IF EXISTS (SELECT TOP 1 * FROM sys.all_columns WHERE [object_id] = OBJECT_ID('sy
// We need to have a _parser.RunAsync to make it real async.
private Task CreateAndExecuteInitialQueryAsync(out BulkCopySimpleResultSet result)
{
+ // Check if we have valid cached metadata for the current destination table
+ if (CachedMetadata != null)
+ {
+ SqlClientEventSource.Log.TryTraceEvent("SqlBulkCopy.CreateAndExecuteInitialQueryAsync | Info | Using cached metadata for table '{0}'", _destinationTableName);
+ result = CachedMetadata;
+ return null;
+ }
+
string TDSCommand = CreateInitialQuery();
SqlClientEventSource.Log.TryTraceEvent("SqlBulkCopy.CreateAndExecuteInitialQueryAsync | Info | Initial Query: '{0}'", TDSCommand);
SqlClientEventSource.Log.TryCorrelationTraceEvent("SqlBulkCopy.CreateAndExecuteInitialQueryAsync | Info | Correlation | Object Id {0}, Activity Id {1}", ObjectID, ActivityCorrelator.Current);
@@ -506,6 +527,7 @@ private Task CreateAndExecuteInitialQueryAsync(out Bulk
{
result = new BulkCopySimpleResultSet();
RunParser(result);
+ CacheMetadataIfEnabled(result);
return null;
}
else
@@ -523,17 +545,31 @@ private Task CreateAndExecuteInitialQueryAsync(out Bulk
{
var internalResult = new BulkCopySimpleResultSet();
RunParserReliably(internalResult);
+ CacheMetadataIfEnabled(internalResult);
return internalResult;
}
}, TaskScheduler.Default);
}
}
+ private void CacheMetadataIfEnabled(BulkCopySimpleResultSet result)
+ {
+ if (IsCopyOption(SqlBulkCopyOptions.CacheMetadata))
+ {
+ CachedMetadata = result;
+ SqlClientEventSource.Log.TryTraceEvent("SqlBulkCopy.CacheMetadataIfEnabled | Info | Cached metadata for table '{0}'", _destinationTableName);
+ }
+ }
+
// Matches associated columns with metadata from initial query.
// Builds and executes the update bulk command.
- private string AnalyzeTargetAndCreateUpdateBulkCommand(BulkCopySimpleResultSet internalResults)
+ // metaDataSet is passed in by the caller so that when CacheMetadata is enabled, the
+ // caller can supply a clone, allowing this method to null-prune unmatched/rejected
+ // columns freely without mutating the shared cache.
+ private string AnalyzeTargetAndCreateUpdateBulkCommand(BulkCopySimpleResultSet internalResults, _SqlMetaDataSet metaDataSet)
{
Debug.Assert(internalResults != null, "Where are the results from the initial query?");
+ Debug.Assert(metaDataSet != null, "metaDataSet must not be null");
StringBuilder updateBulkCommandText = new StringBuilder();
@@ -577,8 +613,9 @@ private string AnalyzeTargetAndCreateUpdateBulkCommand(BulkCopySimpleResultSet i
// the next column in the command text.
bool appendComma = false;
- // Loop over the metadata for each result column.
- _SqlMetaDataSet metaDataSet = internalResults[MetaDataResultId].MetaData;
+ // Loop over the metadata for each result column, null-pruning unmatched/rejected
+ // columns. metaDataSet is safe to mutate here — see the call site for clone logic.
+ _operationMetaData = metaDataSet;
_sortedColumnMappings = new List<_ColumnMapping>(metaDataSet.Length);
for (int i = 0; i < metaDataSet.Length; i++)
{
@@ -875,11 +912,18 @@ private void WriteMetaData(BulkCopySimpleResultSet internalResults)
{
_stateObj.SetTimeoutSeconds(BulkCopyTimeout);
- _SqlMetaDataSet metadataCollection = internalResults[MetaDataResultId].MetaData;
+ _SqlMetaDataSet metadataCollection = _operationMetaData ?? internalResults[MetaDataResultId].MetaData;
_stateObj._outputMessageType = TdsEnums.MT_BULK;
_parser.WriteBulkCopyMetaData(metadataCollection, _sortedColumnMappings.Count, _stateObj);
}
+ ///
+ public void ClearCachedMetadata()
+ {
+ CachedMetadata = null;
+ SqlClientEventSource.Log.TryTraceEvent("SqlBulkCopy.ClearCachedMetadata | Info | Metadata cache cleared");
+ }
+
// Terminates the bulk copy operation.
// Must be called at the end of the bulk copy session.
///
@@ -900,6 +944,8 @@ private void Dispose(bool disposing)
// Dispose dependent objects
_columnMappings = null;
_parser = null;
+ CachedMetadata = null;
+ _operationMetaData = null;
try
{
// Just in case there is a lingering transaction (which there shouldn't be)
@@ -2667,7 +2713,7 @@ private Task CopyBatchesAsyncContinued(BulkCopySimpleResultSet internalResults,
// Load encryption keys now (if needed)
_parser.LoadColumnEncryptionKeys(
- internalResults[MetaDataResultId].MetaData,
+ _operationMetaData ?? internalResults[MetaDataResultId].MetaData,
_connection);
Task task = CopyRowsAsync(0, _savedBatchSize, cts); // This is copying 1 batch of rows and setting _hasMoreRowToCopy = true/false.
@@ -2840,7 +2886,14 @@ private void WriteToServerInternalRestContinuedAsync(BulkCopySimpleResultSet int
try
{
- updateBulkCommandText = AnalyzeTargetAndCreateUpdateBulkCommand(internalResults);
+ // When CacheMetadata is enabled, internalResults IS the cached result set (see
+ // CreateAndExecuteInitialQueryAsync). Clone the metadata set so that
+ // AnalyzeTargetAndCreateUpdateBulkCommand can null-prune unmatched/rejected
+ // columns without mutating the cache across WriteToServer calls.
+ _SqlMetaDataSet metaDataSet = CachedMetadata != null
+ ? internalResults[MetaDataResultId].MetaData.Clone()
+ : internalResults[MetaDataResultId].MetaData;
+ updateBulkCommandText = AnalyzeTargetAndCreateUpdateBulkCommand(internalResults, metaDataSet);
if (_sortedColumnMappings.Count != 0)
{
@@ -3194,6 +3247,7 @@ private void ResetWriteToServerGlobalVariables()
_dataTableSource = null;
_dbDataReaderRowSource = null;
_isAsyncBulkCopy = false;
+ _operationMetaData = null;
_rowEnumerator = null;
_rowSource = null;
_rowSourceType = ValueSourceType.Unspecified;
diff --git a/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopyOptions.cs b/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopyOptions.cs
index 5454e609aa..5adbb21101 100644
--- a/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopyOptions.cs
+++ b/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/SqlBulkCopyOptions.cs
@@ -33,6 +33,9 @@ public enum SqlBulkCopyOptions
///
AllowEncryptedValueModifications = 1 << 6,
+
+ ///
+ CacheMetadata = 1 << 7,
}
}
diff --git a/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/TdsParserHelperClasses.cs b/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/TdsParserHelperClasses.cs
index f189030d1e..2d44aea7da 100644
--- a/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/TdsParserHelperClasses.cs
+++ b/src/Microsoft.Data.SqlClient/src/Microsoft/Data/SqlClient/TdsParserHelperClasses.cs
@@ -352,6 +352,7 @@ private _SqlMetaDataSet(_SqlMetaDataSet original)
_visibleColumnMap = original._visibleColumnMap;
dbColumnSchema = original.dbColumnSchema;
schemaTable = original.schemaTable;
+ cekTable = original.cekTable;
if (original._metaDataArray == null)
{
@@ -577,6 +578,10 @@ internal virtual void CopyFrom(SqlMetaDataPriv original)
xmlSchemaCollection = new SqlMetaDataXmlSchemaCollection();
xmlSchemaCollection.CopyFrom(original.xmlSchemaCollection);
}
+
+ this.isEncrypted = original.isEncrypted;
+ this.baseTI = original.baseTI;
+ this.cipherMD = original.cipherMD;
}
}
diff --git a/src/Microsoft.Data.SqlClient/tests/ManualTests/Microsoft.Data.SqlClient.ManualTests.csproj b/src/Microsoft.Data.SqlClient/tests/ManualTests/Microsoft.Data.SqlClient.ManualTests.csproj
index 44bb79cbc9..efc10126aa 100644
--- a/src/Microsoft.Data.SqlClient/tests/ManualTests/Microsoft.Data.SqlClient.ManualTests.csproj
+++ b/src/Microsoft.Data.SqlClient/tests/ManualTests/Microsoft.Data.SqlClient.ManualTests.csproj
@@ -111,6 +111,7 @@
+
diff --git a/src/Microsoft.Data.SqlClient/tests/ManualTests/SQL/SqlBulkCopyTest/CacheMetadata.cs b/src/Microsoft.Data.SqlClient/tests/ManualTests/SQL/SqlBulkCopyTest/CacheMetadata.cs
new file mode 100644
index 0000000000..36f37fc2e7
--- /dev/null
+++ b/src/Microsoft.Data.SqlClient/tests/ManualTests/SQL/SqlBulkCopyTest/CacheMetadata.cs
@@ -0,0 +1,519 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Data;
+using System.Threading.Tasks;
+using Xunit;
+
+namespace Microsoft.Data.SqlClient.ManualTesting.Tests
+{
+ public class CacheMetadata
+ {
+ private static readonly string sourceTable = "employees";
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(20), col3 nvarchar(10))";
+ private static readonly string sourceQueryTemplate = "select top 5 EmployeeID, LastName, FirstName from {0}";
+
+ // Test that CacheMetadata option works for multiple WriteToServer calls to the same table.
+ public static void Test(string srcConstr, string dstConstr, string dstTable)
+ {
+ string sourceQuery = string.Format(sourceQueryTemplate, sourceTable);
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata, null);
+ bulkcopy.DestinationTableName = dstTable;
+
+ // First WriteToServer: metadata is queried and cached.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 5);
+
+ // Second WriteToServer: should reuse cached metadata.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 10);
+
+ // Third WriteToServer: should still reuse cached metadata.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 15);
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+
+ public class CacheMetadataInvalidate
+ {
+ private static readonly string sourceTable = "employees";
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(20), col3 nvarchar(10))";
+ private static readonly string sourceQueryTemplate = "select top 5 EmployeeID, LastName, FirstName from {0}";
+
+ // Test that ClearCachedMetadata forces a fresh metadata query.
+ public static void Test(string srcConstr, string dstConstr, string dstTable)
+ {
+ string sourceQuery = string.Format(sourceQueryTemplate, sourceTable);
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata, null);
+ bulkcopy.DestinationTableName = dstTable;
+
+ // First WriteToServer: metadata is queried and cached.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 5);
+
+ // Invalidate the cache and write again: should still succeed after re-querying metadata.
+ bulkcopy.ClearCachedMetadata();
+
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 10);
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+
+ public class CacheMetadataDestinationChange
+ {
+ private static readonly string sourceTable = "employees";
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(20), col3 nvarchar(10))";
+ private static readonly string sourceQueryTemplate = "select top 5 EmployeeID, LastName, FirstName from {0}";
+
+ // Test that changing DestinationTableName invalidates the cache and works correctly with a new table.
+ public static void Test(string srcConstr, string dstConstr, string dstTable1, string dstTable2)
+ {
+ string sourceQuery = string.Format(sourceQueryTemplate, sourceTable);
+ string initialQuery1 = string.Format(initialQueryTemplate, dstTable1);
+ string initialQuery2 = string.Format(initialQueryTemplate, dstTable2);
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery1);
+ Helpers.TryExecute(dstCmd, initialQuery2);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata, null);
+
+ // Write to first table.
+ bulkcopy.DestinationTableName = dstTable1;
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable1, 3, 5);
+
+ // Change destination table: cache should be invalidated automatically.
+ bulkcopy.DestinationTableName = dstTable2;
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable2, 3, 5);
+ }
+ finally
+ {
+ Helpers.TryDropTable(dstConstr, dstTable1);
+ Helpers.TryDropTable(dstConstr, dstTable2);
+ }
+ }
+ }
+
+ public class CacheMetadataWithoutFlag
+ {
+ private static readonly string sourceTable = "employees";
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(20), col3 nvarchar(10))";
+ private static readonly string sourceQueryTemplate = "select top 5 EmployeeID, LastName, FirstName from {0}";
+
+ // Test that without the CacheMetadata flag, multiple writes still work (no regression).
+ public static void Test(string srcConstr, string dstConstr, string dstTable)
+ {
+ string sourceQuery = string.Format(sourceQueryTemplate, sourceTable);
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn);
+ bulkcopy.DestinationTableName = dstTable;
+
+ // First WriteToServer without CacheMetadata.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 5);
+
+ // Second WriteToServer without CacheMetadata.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ srcConn.Open();
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = srcCmd.ExecuteReader();
+ bulkcopy.WriteToServer(reader);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 10);
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+
+ public class CacheMetadataWithDataTable
+ {
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(50), col3 nvarchar(50))";
+
+ // Test that CacheMetadata works with DataTable source as well as IDataReader.
+ public static void Test(string dstConstr, string dstTable)
+ {
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using DataTable sourceData = new();
+ sourceData.Columns.Add("col1", typeof(int));
+ sourceData.Columns.Add("col2", typeof(string));
+ sourceData.Columns.Add("col3", typeof(string));
+ sourceData.Rows.Add(1, "Alice", "Smith");
+ sourceData.Rows.Add(2, "Bob", "Jones");
+ sourceData.Rows.Add(3, "Charlie", "Brown");
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata, null);
+ bulkcopy.DestinationTableName = dstTable;
+
+ // First WriteToServer with DataTable: metadata is queried and cached.
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 3);
+
+ // Second WriteToServer with DataTable: should reuse cached metadata.
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 6);
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+
+ public class CacheMetadataColumnMappingsChange
+ {
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(50), col3 nvarchar(50))";
+
+ // Test that changing ColumnMappings between WriteToServer calls works correctly with CacheMetadata.
+ // The cached metadata describes the destination table schema, not the column mappings,
+ // so modifying mappings between calls should work without cache invalidation.
+ public static void Test(string dstConstr, string dstTable)
+ {
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using DataTable sourceData = new DataTable();
+ sourceData.Columns.Add("id", typeof(int));
+ sourceData.Columns.Add("firstName", typeof(string));
+ sourceData.Columns.Add("lastName", typeof(string));
+ sourceData.Rows.Add(1, "Alice", "Smith");
+ sourceData.Rows.Add(2, "Bob", "Jones");
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata, null);
+ bulkcopy.DestinationTableName = dstTable;
+
+ // First write: map firstName -> col2, lastName -> col3.
+ bulkcopy.ColumnMappings.Add("id", "col1");
+ bulkcopy.ColumnMappings.Add("firstName", "col2");
+ bulkcopy.ColumnMappings.Add("lastName", "col3");
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 2);
+
+ // Verify first mapping: col2 should contain firstName values.
+ using (SqlCommand verifyCmd = new("select col2 from " + dstTable + " where col1 = 1", dstConn))
+ {
+ object result = verifyCmd.ExecuteScalar();
+ Assert.Equal("Alice", result);
+ }
+
+ // Change mappings: swap col2 and col3 targets.
+ bulkcopy.ColumnMappings.Clear();
+ bulkcopy.ColumnMappings.Add("id", "col1");
+ bulkcopy.ColumnMappings.Add("firstName", "col3");
+ bulkcopy.ColumnMappings.Add("lastName", "col2");
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 4);
+
+ // Verify second mapping: col3 should now contain firstName values for the new rows.
+ using (SqlCommand verifyCmd = new("select col3 from " + dstTable + " where col1 = 1 order by col2", dstConn))
+ {
+ using SqlDataReader reader = verifyCmd.ExecuteReader();
+
+ // First row (from first write): col3 = "Smith" (lastName).
+ Assert.True(reader.Read());
+ Assert.Equal("Smith", reader.GetString(0));
+
+ // Second row (from second write): col3 = "Alice" (firstName).
+ Assert.True(reader.Read());
+ Assert.Equal("Alice", reader.GetString(0));
+ }
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+
+ public class CacheMetadataColumnSubsetChange
+ {
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(50), col3 nvarchar(50))";
+
+ // Test that mapping a subset of columns on the first call, then all columns on the
+ // second call, works correctly with CacheMetadata. This verifies that null-pruning of
+ // unmatched columns in AnalyzeTargetAndCreateUpdateBulkCommand does not mutate the
+ // cached metadata, which would cause a NullReferenceException on the second call.
+ public static void Test(string dstConstr, string dstTable)
+ {
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using DataTable sourceData = new DataTable();
+ sourceData.Columns.Add("id", typeof(int));
+ sourceData.Columns.Add("firstName", typeof(string));
+ sourceData.Columns.Add("lastName", typeof(string));
+ sourceData.Rows.Add(1, "Alice", "Smith");
+ sourceData.Rows.Add(2, "Bob", "Jones");
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata, null);
+ bulkcopy.DestinationTableName = dstTable;
+
+ // First write: map only col1 and col2 (col3 is unmatched and will be pruned).
+ bulkcopy.ColumnMappings.Add("id", "col1");
+ bulkcopy.ColumnMappings.Add("firstName", "col2");
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 2);
+
+ // Second write: map all three columns including col3.
+ // Without the clone fix, this would fail because col3 metadata was
+ // permanently nulled in the cache during the first call.
+ bulkcopy.ColumnMappings.Clear();
+ bulkcopy.ColumnMappings.Add("id", "col1");
+ bulkcopy.ColumnMappings.Add("firstName", "col2");
+ bulkcopy.ColumnMappings.Add("lastName", "col3");
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 4);
+
+ // Verify col3 has the expected data from the second write.
+ using (SqlCommand verifyCmd = new("select col3 from " + dstTable + " where col1 = 1 and col3 is not null", dstConn))
+ {
+ object result = verifyCmd.ExecuteScalar();
+ Assert.Equal("Smith", result);
+ }
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+
+ public class CacheMetadataAsync
+ {
+ private static readonly string sourceTable = "employees";
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(20), col3 nvarchar(10))";
+ private static readonly string sourceQueryTemplate = "select top 5 EmployeeID, LastName, FirstName from {0}";
+
+ // Test that CacheMetadata works correctly with WriteToServerAsync.
+ public static void Test(string srcConstr, string dstConstr, string dstTable)
+ {
+ Task t = TestAsync(srcConstr, dstConstr, dstTable);
+ t.Wait();
+ Assert.True(t.IsCompleted, "Task did not complete! Status: " + t.Status);
+ }
+
+ private static async Task TestAsync(string srcConstr, string dstConstr, string dstTable)
+ {
+ string sourceQuery = string.Format(sourceQueryTemplate, sourceTable);
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata, null);
+ bulkcopy.DestinationTableName = dstTable;
+
+ // First WriteToServerAsync: metadata is queried and cached.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ await srcConn.OpenAsync().ConfigureAwait(false);
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = await srcCmd.ExecuteReaderAsync().ConfigureAwait(false);
+ await bulkcopy.WriteToServerAsync(reader).ConfigureAwait(false);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 5);
+
+ // Second WriteToServerAsync: should reuse cached metadata.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ await srcConn.OpenAsync().ConfigureAwait(false);
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = await srcCmd.ExecuteReaderAsync().ConfigureAwait(false);
+ await bulkcopy.WriteToServerAsync(reader).ConfigureAwait(false);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 10);
+
+ // Third WriteToServerAsync: should still reuse cached metadata.
+ using (SqlConnection srcConn = new(srcConstr))
+ {
+ await srcConn.OpenAsync().ConfigureAwait(false);
+ using SqlCommand srcCmd = new(sourceQuery, srcConn);
+ using IDataReader reader = await srcCmd.ExecuteReaderAsync().ConfigureAwait(false);
+ await bulkcopy.WriteToServerAsync(reader).ConfigureAwait(false);
+ }
+ Helpers.VerifyResults(dstConn, dstTable, 3, 15);
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+
+ public class CacheMetadataCombinedWithKeepNulls
+ {
+ private static readonly string initialQueryTemplate = "create table {0} (col1 int, col2 nvarchar(50) default 'DefaultVal', col3 nvarchar(50))";
+
+ // Test that CacheMetadata works correctly when combined with other SqlBulkCopyOptions.
+ public static void Test(string dstConstr, string dstTable)
+ {
+ string initialQuery = string.Format(initialQueryTemplate, dstTable);
+
+ using DataTable sourceData = new();
+ sourceData.Columns.Add("col1", typeof(int));
+ sourceData.Columns.Add("col2", typeof(string));
+ sourceData.Columns.Add("col3", typeof(string));
+ sourceData.Rows.Add(1, DBNull.Value, "Smith");
+ sourceData.Rows.Add(2, "Bob", DBNull.Value);
+
+ using SqlConnection dstConn = new(dstConstr);
+ using SqlCommand dstCmd = dstConn.CreateCommand();
+ dstConn.Open();
+
+ try
+ {
+ Helpers.TryExecute(dstCmd, initialQuery);
+
+ using SqlBulkCopy bulkcopy = new(dstConn, SqlBulkCopyOptions.CacheMetadata | SqlBulkCopyOptions.KeepNulls, null);
+ bulkcopy.DestinationTableName = dstTable;
+ bulkcopy.ColumnMappings.Add("col1", "col1");
+ bulkcopy.ColumnMappings.Add("col2", "col2");
+ bulkcopy.ColumnMappings.Add("col3", "col3");
+
+ // First write with CacheMetadata | KeepNulls.
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 2);
+
+ // Verify nulls were kept (not replaced by default values).
+ using SqlCommand verifyCmd = new("select col2 from " + dstTable + " where col1 = 1", dstConn);
+ object result = verifyCmd.ExecuteScalar();
+ Assert.Equal(System.DBNull.Value, result);
+
+ // Second write should reuse cached metadata.
+ bulkcopy.WriteToServer(sourceData);
+ Helpers.VerifyResults(dstConn, dstTable, 3, 4);
+ }
+ finally
+ {
+ Helpers.TryExecute(dstCmd, "drop table " + dstTable);
+ }
+ }
+ }
+}
diff --git a/src/Microsoft.Data.SqlClient/tests/ManualTests/SQL/SqlBulkCopyTest/SqlBulkCopyTest.cs b/src/Microsoft.Data.SqlClient/tests/ManualTests/SQL/SqlBulkCopyTest/SqlBulkCopyTest.cs
index 4672adb242..90a4e5cbf3 100644
--- a/src/Microsoft.Data.SqlClient/tests/ManualTests/SQL/SqlBulkCopyTest/SqlBulkCopyTest.cs
+++ b/src/Microsoft.Data.SqlClient/tests/ManualTests/SQL/SqlBulkCopyTest/SqlBulkCopyTest.cs
@@ -308,5 +308,59 @@ public void OrderHintIdentityColumnTest()
{
OrderHintIdentityColumn.Test(_connStr, AddGuid("SqlBulkCopyTest_OrderHintIdentityColumn"));
}
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataTest()
+ {
+ CacheMetadata.Test(_connStr, _connStr, AddGuid("SqlBulkCopyTest_CacheMetadata"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataInvalidateTest()
+ {
+ CacheMetadataInvalidate.Test(_connStr, _connStr, AddGuid("SqlBulkCopyTest_CacheMetadataInvalidate"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataDestinationChangeTest()
+ {
+ CacheMetadataDestinationChange.Test(_connStr, _connStr, AddGuid("SqlBulkCopyTest_CacheMetadataDstChange0"), AddGuid("SqlBulkCopyTest_CacheMetadataDstChange1"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataWithoutFlagTest()
+ {
+ CacheMetadataWithoutFlag.Test(_connStr, _connStr, AddGuid("SqlBulkCopyTest_CacheMetadataNoFlag"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataWithDataTableTest()
+ {
+ CacheMetadataWithDataTable.Test(_connStr, AddGuid("SqlBulkCopyTest_CacheMetadataDT"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataColumnMappingsChangeTest()
+ {
+ CacheMetadataColumnMappingsChange.Test(_connStr, AddGuid("SqlBulkCopyTest_CacheMetadataColMap"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataColumnSubsetChangeTest()
+ {
+ CacheMetadataColumnSubsetChange.Test(_connStr, AddGuid("SqlBulkCopyTest_CacheMetadataSubset"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataCombinedWithKeepNullsTest()
+ {
+ CacheMetadataCombinedWithKeepNulls.Test(_connStr, AddGuid("SqlBulkCopyTest_CacheMetadataKeepNulls"));
+ }
+
+ [ConditionalFact(typeof(DataTestUtility), nameof(DataTestUtility.AreConnStringsSetup), nameof(DataTestUtility.IsNotAzureServer))]
+ public void CacheMetadataAsyncTest()
+ {
+ CacheMetadataAsync.Test(_connStr, _connStr, AddGuid("SqlBulkCopyTest_CacheMetadataAsync"));
+ }
}
}
diff --git a/src/Microsoft.Data.SqlClient/tests/UnitTests/Microsoft/Data/SqlClient/SqlBulkCopyCacheMetadataTest.cs b/src/Microsoft.Data.SqlClient/tests/UnitTests/Microsoft/Data/SqlClient/SqlBulkCopyCacheMetadataTest.cs
new file mode 100644
index 0000000000..90aa4a8f5e
--- /dev/null
+++ b/src/Microsoft.Data.SqlClient/tests/UnitTests/Microsoft/Data/SqlClient/SqlBulkCopyCacheMetadataTest.cs
@@ -0,0 +1,134 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Linq;
+using System.Reflection;
+using Xunit;
+
+namespace Microsoft.Data.SqlClient.UnitTests
+{
+ public class SqlBulkCopyCacheMetadataTest
+ {
+ private static void SetCachedMetadata(SqlBulkCopy bulkCopy, BulkCopySimpleResultSet value)
+ {
+ typeof(SqlBulkCopy)
+ .GetProperty("CachedMetadata", BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.Public)!
+ .SetValue(bulkCopy, value);
+ }
+
+ [Fact]
+ public void CacheMetadata_FlagValue_IsCorrect()
+ {
+ Assert.Equal(1 << 7, (int)SqlBulkCopyOptions.CacheMetadata);
+ }
+
+ [Fact]
+ public void CacheMetadata_CanBeCombinedWithOtherOptions()
+ {
+ SqlBulkCopyOptions combined =
+ SqlBulkCopyOptions.CacheMetadata |
+ SqlBulkCopyOptions.KeepIdentity |
+ SqlBulkCopyOptions.TableLock;
+
+ Assert.True((combined & SqlBulkCopyOptions.CacheMetadata) == SqlBulkCopyOptions.CacheMetadata);
+ Assert.True((combined & SqlBulkCopyOptions.KeepIdentity) == SqlBulkCopyOptions.KeepIdentity);
+ Assert.True((combined & SqlBulkCopyOptions.TableLock) == SqlBulkCopyOptions.TableLock);
+ }
+
+ [Fact]
+ public void SqlBulkCopyOptions_AllValues_AreUnique()
+ {
+ int[] values = Enum.GetValues(typeof(SqlBulkCopyOptions))
+ .Cast()
+ .ToArray();
+
+ Assert.Equal(values.Length, values.Distinct().Count());
+ }
+
+ [Fact]
+ public void ClearCachedMetadata_ClearsCachedMetadata()
+ {
+ using SqlBulkCopy bulkCopy = new(new SqlConnection(), SqlBulkCopyOptions.CacheMetadata, null);
+
+ SetCachedMetadata(bulkCopy, new BulkCopySimpleResultSet());
+
+ bulkCopy.ClearCachedMetadata();
+
+ Assert.Null(bulkCopy.CachedMetadata);
+ }
+
+ [Fact]
+ public void ClearCachedMetadata_CanBeCalledMultipleTimes()
+ {
+ using SqlBulkCopy bulkCopy = new(new SqlConnection(), SqlBulkCopyOptions.CacheMetadata, null);
+
+ SetCachedMetadata(bulkCopy, new BulkCopySimpleResultSet());
+
+ bulkCopy.ClearCachedMetadata();
+ bulkCopy.ClearCachedMetadata();
+ bulkCopy.ClearCachedMetadata();
+
+ Assert.Null(bulkCopy.CachedMetadata);
+ }
+
+ [Fact]
+ public void ClearCachedMetadata_WhenNoCachedData_DoesNotThrow()
+ {
+ using SqlBulkCopy bulkCopy = new(new SqlConnection(), SqlBulkCopyOptions.CacheMetadata, null);
+
+ Assert.Null(bulkCopy.CachedMetadata);
+
+ bulkCopy.ClearCachedMetadata();
+
+ Assert.Null(bulkCopy.CachedMetadata);
+ }
+
+ [Fact]
+ public void ClearCachedMetadata_WithoutCacheMetadataOption_ClearsCachedMetadata()
+ {
+ using SqlBulkCopy bulkCopy = new(new SqlConnection(), SqlBulkCopyOptions.Default, null);
+
+ SetCachedMetadata(bulkCopy, new BulkCopySimpleResultSet());
+
+ bulkCopy.ClearCachedMetadata();
+
+ Assert.Null(bulkCopy.CachedMetadata);
+ }
+
+ [Fact]
+ public void DestinationTableName_Change_ClearsCachedMetadata()
+ {
+ using SqlBulkCopy bulkCopy = new(new SqlConnection(), SqlBulkCopyOptions.CacheMetadata, null);
+
+ // Set backing field first so the setter sees a matching name
+ bulkCopy.DestinationTableName = "Table1";
+
+ // Simulate cached state after a WriteToServer call
+ SetCachedMetadata(bulkCopy, new BulkCopySimpleResultSet());
+
+ // Setting the same name should NOT clear the cache
+ bulkCopy.DestinationTableName = "Table1";
+ Assert.NotNull(bulkCopy.CachedMetadata);
+
+ // Changing to a different table should clear the cache
+ bulkCopy.DestinationTableName = "Table2";
+ Assert.Null(bulkCopy.CachedMetadata);
+ }
+
+ [Fact]
+ public void Constructor_WithCacheMetadataOption_Succeeds()
+ {
+ using SqlBulkCopy bulkCopy = new(new SqlConnection(), SqlBulkCopyOptions.CacheMetadata, null);
+ Assert.NotNull(bulkCopy);
+ }
+
+ [Fact]
+ public void Constructor_WithCacheMetadataAndConnectionString_Succeeds()
+ {
+ using SqlBulkCopy bulkCopy = new("Server=localhost", SqlBulkCopyOptions.CacheMetadata);
+ Assert.NotNull(bulkCopy);
+ }
+ }
+}
diff --git a/src/Microsoft.Data.SqlClient/tests/UnitTests/Microsoft/Data/SqlClient/SqlMetaDataSetTest.cs b/src/Microsoft.Data.SqlClient/tests/UnitTests/Microsoft/Data/SqlClient/SqlMetaDataSetTest.cs
new file mode 100644
index 0000000000..37ce70d2c8
--- /dev/null
+++ b/src/Microsoft.Data.SqlClient/tests/UnitTests/Microsoft/Data/SqlClient/SqlMetaDataSetTest.cs
@@ -0,0 +1,269 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using Xunit;
+
+namespace Microsoft.Data.SqlClient.UnitTests
+{
+ ///
+ /// Tests that verify _SqlMetaDataSet.Clone() produces independent copies,
+ /// ensuring that null-pruning of unmatched columns in AnalyzeTargetAndCreateUpdateBulkCommand
+ /// does not corrupt the cached metadata when CacheMetadata is enabled.
+ ///
+ public class SqlMetaDataSetTest
+ {
+ [Fact]
+ public void SqlMetaDataSet_Clone_ProducesIndependentCopy()
+ {
+ // Arrange: create a metadata set with 3 columns simulating a destination table
+ _SqlMetaDataSet original = new _SqlMetaDataSet(3);
+ original[0].column = "col1";
+ original[1].column = "col2";
+ original[2].column = "col3";
+
+ // Act: clone and then null out an entry in the clone (simulating column pruning)
+ _SqlMetaDataSet clone = original.Clone();
+ clone[2] = null;
+
+ // Assert: the original is not affected by the mutation of the clone
+ Assert.NotNull(original[0]);
+ Assert.NotNull(original[1]);
+ Assert.NotNull(original[2]);
+ Assert.Equal("col1", original[0].column);
+ Assert.Equal("col2", original[1].column);
+ Assert.Equal("col3", original[2].column);
+ }
+
+ [Fact]
+ public void SqlMetaDataSet_Clone_NullingMultipleEntries_OriginalRetainsAll()
+ {
+ // Arrange: simulate a table with 4 columns
+ _SqlMetaDataSet original = new _SqlMetaDataSet(4);
+ original[0].column = "id";
+ original[1].column = "name";
+ original[2].column = "email";
+ original[3].column = "phone";
+
+ // Act: clone and null out entries 1 and 3 (simulating mapping only id and email)
+ _SqlMetaDataSet clone = original.Clone();
+ clone[1] = null;
+ clone[3] = null;
+
+ // Assert: clone has nulls where expected
+ Assert.NotNull(clone[0]);
+ Assert.Null(clone[1]);
+ Assert.NotNull(clone[2]);
+ Assert.Null(clone[3]);
+
+ // Assert: original retains all entries
+ for (int i = 0; i < 4; i++)
+ {
+ Assert.NotNull(original[i]);
+ }
+ Assert.Equal("name", original[1].column);
+ Assert.Equal("phone", original[3].column);
+ }
+
+ [Fact]
+ public void SqlMetaDataSet_Clone_RepeatedCloneAndPrune_OriginalSurvives()
+ {
+ // Arrange: simulate the scenario where multiple WriteToServer calls each
+ // clone and prune different subsets of columns
+ _SqlMetaDataSet original = new _SqlMetaDataSet(3);
+ original[0].column = "col1";
+ original[1].column = "col2";
+ original[2].column = "col3";
+
+ // First operation: map only col1 and col2 (prune col3)
+ _SqlMetaDataSet clone1 = original.Clone();
+ clone1[2] = null;
+
+ // Second operation: map only col1 and col3 (prune col2)
+ _SqlMetaDataSet clone2 = original.Clone();
+ clone2[1] = null;
+
+ // Third operation: map all columns (no pruning needed)
+ _SqlMetaDataSet clone3 = original.Clone();
+
+ // Assert: original is fully intact after all operations
+ Assert.NotNull(original[0]);
+ Assert.NotNull(original[1]);
+ Assert.NotNull(original[2]);
+ Assert.Equal("col1", original[0].column);
+ Assert.Equal("col2", original[1].column);
+ Assert.Equal("col3", original[2].column);
+
+ // Assert: each clone reflects its own pruning
+ Assert.Null(clone1[2]);
+ Assert.NotNull(clone1[1]);
+
+ Assert.Null(clone2[1]);
+ Assert.NotNull(clone2[2]);
+
+ Assert.NotNull(clone3[0]);
+ Assert.NotNull(clone3[1]);
+ Assert.NotNull(clone3[2]);
+ }
+
+ [Fact]
+ public void SqlMetaDataSet_Clone_PreservesOrdinals()
+ {
+ // Verify that cloned entries maintain correct ordinal values,
+ // which are used for column matching in AnalyzeTargetAndCreateUpdateBulkCommand
+ _SqlMetaDataSet original = new _SqlMetaDataSet(3);
+ original[0].column = "col1";
+ original[1].column = "col2";
+ original[2].column = "col3";
+
+ _SqlMetaDataSet clone = original.Clone();
+
+ Assert.Equal(original[0].ordinal, clone[0].ordinal);
+ Assert.Equal(original[1].ordinal, clone[1].ordinal);
+ Assert.Equal(original[2].ordinal, clone[2].ordinal);
+ }
+
+ [Fact]
+ public void SqlMetaDataSet_Clone_PreservesCekTable()
+ {
+ // Verify that cloning preserves the CEK table reference, which is needed by
+ // WriteCekTable in TdsParser to send encryption key entries to SQL Server.
+ // Without this, WriteCekTable sees cekTable == null and writes 0 CEK entries.
+ SqlTceCipherInfoTable cekTable = new SqlTceCipherInfoTable(2);
+ cekTable[0] = new SqlTceCipherInfoEntry(ordinal: 0);
+ cekTable[1] = new SqlTceCipherInfoEntry(ordinal: 1);
+
+ _SqlMetaDataSet original = new _SqlMetaDataSet(2, cekTable);
+ original[0].column = "col1";
+ original[1].column = "col2";
+
+ _SqlMetaDataSet clone = original.Clone();
+
+ Assert.NotNull(clone.cekTable);
+ Assert.Same(original.cekTable, clone.cekTable);
+ Assert.Equal(2, clone.cekTable.Size);
+ }
+
+ [Fact]
+ public void SqlMetaData_Clone_PreservesIsEncrypted()
+ {
+ // Verify that cloning a _SqlMetaData entry preserves the isEncrypted flag.
+ // WriteBulkCopyMetaData checks md.isEncrypted to set the TDS IsEncrypted flag
+ // and WriteCryptoMetadata checks it to decide whether to write cipher metadata.
+ // If lost, encrypted columns are sent as plaintext.
+ _SqlMetaDataSet original = new _SqlMetaDataSet(1);
+ original[0].column = "encrypted_col";
+ original[0].isEncrypted = true;
+
+ _SqlMetaDataSet clone = original.Clone();
+
+ Assert.True(clone[0].isEncrypted);
+ }
+
+ [Fact]
+ public void SqlMetaData_Clone_PreservesCipherMetadata()
+ {
+ // Verify that cloning preserves cipherMD, which is needed by
+ // WriteCryptoMetadata (for CekTableOrdinal, CipherAlgorithmId, etc.)
+ // and LoadColumnEncryptionKeys (to decrypt symmetric keys).
+ SqlTceCipherInfoEntry cekEntry = new SqlTceCipherInfoEntry(ordinal: 0);
+ SqlCipherMetadata cipherMD = new SqlCipherMetadata(
+ sqlTceCipherInfoEntry: cekEntry,
+ ordinal: 0,
+ cipherAlgorithmId: 2,
+ cipherAlgorithmName: "AEAD_AES_256_CBC_HMAC_SHA256",
+ encryptionType: 1,
+ normalizationRuleVersion: 1
+ );
+
+ _SqlMetaDataSet original = new _SqlMetaDataSet(1);
+ original[0].column = "encrypted_col";
+ original[0].isEncrypted = true;
+ original[0].cipherMD = cipherMD;
+
+ _SqlMetaDataSet clone = original.Clone();
+
+ Assert.NotNull(clone[0].cipherMD);
+ Assert.Equal(2, clone[0].cipherMD.CipherAlgorithmId);
+ Assert.Equal("AEAD_AES_256_CBC_HMAC_SHA256", clone[0].cipherMD.CipherAlgorithmName);
+ Assert.Equal(1, clone[0].cipherMD.EncryptionType);
+ Assert.Equal(1, clone[0].cipherMD.NormalizationRuleVersion);
+ }
+
+ [Fact]
+ public void SqlMetaData_Clone_PreservesBaseTI()
+ {
+ // Verify that cloning preserves baseTI, which represents the plaintext
+ // TYPE_INFO for encrypted columns. WriteCryptoMetadata calls
+ // WriteTceUserTypeAndTypeInfo(md.baseTI) to send the unencrypted type info.
+ SqlMetaDataPriv baseTI = new SqlMetaDataPriv();
+ baseTI.type = System.Data.SqlDbType.NVarChar;
+ baseTI.length = 100;
+ baseTI.precision = 0;
+ baseTI.scale = 0;
+
+ _SqlMetaDataSet original = new _SqlMetaDataSet(1);
+ original[0].column = "encrypted_col";
+ original[0].isEncrypted = true;
+ original[0].baseTI = baseTI;
+
+ _SqlMetaDataSet clone = original.Clone();
+
+ Assert.NotNull(clone[0].baseTI);
+ Assert.Equal(System.Data.SqlDbType.NVarChar, clone[0].baseTI.type);
+ Assert.Equal(100, clone[0].baseTI.length);
+ }
+
+ [Fact]
+ public void SqlMetaDataSet_Clone_PreservesFullAlwaysEncryptedMetadata()
+ {
+ // End-to-end test: verify that a cloned _SqlMetaDataSet with Always Encrypted
+ // metadata retains all AE fields needed by the bulk copy TDS write path:
+ // cekTable (for WriteCekTable), isEncrypted (for flag writing),
+ // cipherMD (for WriteCryptoMetadata), and baseTI (for WriteTceUserTypeAndTypeInfo).
+ SqlTceCipherInfoEntry cekEntry = new SqlTceCipherInfoEntry(ordinal: 0);
+ SqlTceCipherInfoTable cekTable = new SqlTceCipherInfoTable(1);
+ cekTable[0] = cekEntry;
+
+ SqlCipherMetadata cipherMD = new SqlCipherMetadata(
+ sqlTceCipherInfoEntry: cekEntry,
+ ordinal: 0,
+ cipherAlgorithmId: 2,
+ cipherAlgorithmName: "AEAD_AES_256_CBC_HMAC_SHA256",
+ encryptionType: 1,
+ normalizationRuleVersion: 1
+ );
+
+ SqlMetaDataPriv baseTI = new SqlMetaDataPriv();
+ baseTI.type = System.Data.SqlDbType.Int;
+
+ _SqlMetaDataSet original = new _SqlMetaDataSet(2, cekTable);
+ original[0].column = "id";
+ original[1].column = "secret";
+ original[1].isEncrypted = true;
+ original[1].cipherMD = cipherMD;
+ original[1].baseTI = baseTI;
+
+ // Clone and prune column 0 (simulating mapping only the encrypted column)
+ _SqlMetaDataSet clone = original.Clone();
+ clone[0] = null;
+
+ // The pruning must not affect the encrypted column's metadata
+ Assert.NotNull(clone[1]);
+ Assert.True(clone[1].isEncrypted);
+ Assert.NotNull(clone[1].cipherMD);
+ Assert.NotNull(clone[1].baseTI);
+ Assert.Equal(System.Data.SqlDbType.Int, clone[1].baseTI.type);
+
+ // The cekTable must be preserved on the clone
+ Assert.NotNull(clone.cekTable);
+ Assert.Equal(1, clone.cekTable.Size);
+
+ // The original must remain completely intact
+ Assert.NotNull(original[0]);
+ Assert.NotNull(original[1]);
+ Assert.NotNull(original.cekTable);
+ Assert.True(original[1].isEncrypted);
+ }
+ }
+}
\ No newline at end of file