SQL Server procedure optimization testing pattern improvement - sql-server

I've been doing some SQL Server procedures optimization lately and was looking for a testing pattern (time and result wise). I've came with this solution so far:
SET NOCOUNT ON;
----------------------------------------------------------------------------------------------------------------
-- Procedures data and performance testing pattern
----------------------------------------------------------------------------------------------------------------
-- Prepare test queries (most likely will be taken from Logs.ProcedureTraceData (DATAUK/DATAUS servers)
-- Procedures should insert records into Temporary table, so we can compare their results using EXCEPT
-- If result set columns are fixed (i.e. no Dynamic SQL is used), we can create Temporary tables inside script
-- and insert records in them to do comparison and just TRUNCATE them at the end of the loop.
-- example here: http://stackoverflow.com/a/654418/3680098
-- If there're any data discrepancies or record counts are different, it will be displayed in TraceLog table
----------------------------------------------------------------------------------------------------------------
-- Create your own TraceLog table to keep records
----------------------------------------------------------------------------------------------------------------
/*
CREATE TABLE Temporary._EB_TraceLog
(
ID INT NOT NULL IDENTITY(1, 1) CONSTRAINT PK_Temporary_EB_TraceLog_ID PRIMARY KEY
, CurrentExecutionTime INT
, TempExecutionTime INT
, CurrentExecutionResultsCount INT
, TempExecutionResultsCount INT
, IsDifferent BIT CONSTRAINT DF_Temporary_EB_TraceLog_IsDifferent DEFAULT 0 NOT NULL
, TimeDiff AS CurrentExecutionTime - TempExecutionTime
, PercentageDiff AS CAST(((CAST(CurrentExecutionTime AS DECIMAL)/ CAST(TempExecutionTime AS DECIMAL)) * 100 - 100) AS DECIMAL(10, 2))
, TextData NVARCHAR(MAX)
);
SELECT *
FROM Temporary._EB_TraceLog;
TRUNCATE TABLE Temporary._EB_TraceLog;
*/
INSERT INTO Temporary._EB_TraceLog (TextData)
SELECT TextData
FROM Temporary._EB_GetData_Timeouts
EXCEPT
SELECT TextData
FROM Temporary._EB_TraceLog;
DECLARE #Counter INT;
SELECT #Counter = MIN(ID)
FROM Temporary._EB_TraceLog
WHERE CurrentExecutionTime IS NULL
OR TempExecutionTime IS NULL
OR CurrentExecutionResultsCount IS NULL
OR TempExecutionResultsCount IS NULL;
WHILE #Counter <= (SELECT MAX(ID) FROM Temporary._EB_TraceLog)
BEGIN
DECLARE #SQLStringCurr NVARCHAR(MAX);
DECLARE #SQLStringTemp NVARCHAR(MAX);
DECLARE #StartTime DATETIME2;
SELECT #SQLStringCurr = REPLACE(TextData, 'dbo.GetData', 'Temporary._EB_GetData_Orig')
, #SQLStringTemp = REPLACE(TextData, 'dbo.GetData', 'Temporary._EB_GetData_Mod')
FROM Temporary._EB_TraceLog
WHERE ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Drop temporary tables in script, so these numbers don't figure in SP execution time
----------------------------------------------------------------------------------------------------------------
IF OBJECT_ID(N'Temporary._EB_Test_Orig') IS NOT NULL
DROP TABLE Temporary._EB_Test_Orig;
IF OBJECT_ID(N'Temporary._EB_Test_Mod') IS NOT NULL
DROP TABLE Temporary._EB_Test_Mod;
----------------------------------------------------------------------------------------------------------------
-- Actual testing
----------------------------------------------------------------------------------------------------------------
-- Take time snapshot and execute original procedure, which inserts records into Temporary table
-- When done - measurements will be updated on TraceLog table
----------------------------------------------------------------------------------------------------------------
SELECT #StartTime = CURRENT_TIMESTAMP;
EXECUTE sp_executesql #SQLStringCurr;
UPDATE T
SET T.CurrentExecutionTime = DATEDIFF(MILLISECOND, #StartTime, CURRENT_TIMESTAMP)
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Take time snapshot and execute optimized procedure, which inserts records into Temporary table
-- When done - measurements will be updated on TraceLog table
----------------------------------------------------------------------------------------------------------------
SELECT #StartTime = CURRENT_TIMESTAMP;
EXECUTE sp_executesql #SQLStringTemp;
UPDATE T
SET T.TempExecutionTime = DATEDIFF(MILLISECOND, #StartTime, CURRENT_TIMESTAMP)
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Check if there are any data discrepancies
-- If there are any, set IsDifferent to 1, so we can find the root cause
----------------------------------------------------------------------------------------------------------------
IF EXISTS (SELECT * FROM Temporary._EB_Test_Mod EXCEPT SELECT * FROM Temporary._EB_Test_Orig)
OR EXISTS (SELECT * FROM Temporary._EB_Test_Orig EXCEPT SELECT * FROM Temporary._EB_Test_Mod)
BEGIN
UPDATE T
SET T.IsDifferent = 1
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
END
----------------------------------------------------------------------------------------------------------------
-- Update record counts for each execution
-- We can check if there aren't any different record counts even tho results are same
-- EXCEPT clause removes duplicates when doing checks
----------------------------------------------------------------------------------------------------------------
UPDATE T
SET T.CurrentExecutionResultsCount = (SELECT COUNT(*) FROM Temporary._EB_Test_Orig)
, T.TempExecutionResultsCount = (SELECT COUNT(*) FROM Temporary._EB_Test_Mod)
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Print iteration number and proceed on next one
----------------------------------------------------------------------------------------------------------------
PRINT #Counter;
SET #Counter += 1;
END
SELECT *
FROM Temporary._EB_TraceLog;
This works quite well so far, but I would like to include IO and TIME statistics in each iteration. Is that possible?
I know I can do it using:
SET STATISTICS IO ON;
SET STATISTICS TIME ON;
But is there a way to grab summed up values and put them in my TraceLog table?
And on top of that, is there anything doesn't make sense in this piece of code?
Thanks

you can use this query
SELECT total_elapsed_time
FROM sys.dm_exec_query_stats
WHERE sql_handle in (SELECT most_recent_sql_handle
FROM sys.dm_exec_connections
CROSS APPLY sys.dm_exec_sql_text(most_recent_sql_handle)
WHERE session_id = (##spid))

Related

Converting SQL Server script to Oracle

I have a script that I created that works fine in SQL Server, but now I need to make it work in Oracle and I am having issues getting the script converted.
Here is my SQL Server Script:
-- run the commented out statement in another query analyzer window to stop the script
-- update ##stopsign set val = 1
set nocount on
--declare variables
declare #morework int
declare #archivecount int
declare #stopsign int
--if working tables exists clear them, if not create and initialize them
if (object_id('tempdb..##stopsign') is null)
create table ##stopsign (val int)
else
delete from ##stopsign
insert into ##stopsign values (0)
if (object_id('tempdb..#tempdins') is null)
create table #tempdins (tempdin varchar(255) not null, processed int null)
else
delete from #tempdins
--initialize #tempdins working table with all the records eligible to be unarchived
--edit the select statement if needed to change the records to be unarchived
insert into #tempdins(tempdin)
select tempdin
from document
where archivestatus = 'C'
and status = 'U'
option (MAXDOP 1)
--inialize variables with current values
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
select #morework = 1
--while there is more to do, unarchs table has less then 1000 records, and the stopsign value is 0 loop
while (#morework >= 1 and #stopsign = 0)
begin
if #archivecount <1000
begin
-- number to be processed at once
-- change this value if you would like to dump more in to the unarchs table at once
set rowcount 100
update #tempdins
set processed = 0
where processed is null
--reset rowcount
set rowcount 0
--populate the unarchs table with valid values
--this will unarchive at the page (lowest) level
insert into unarchs (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from document
where tempdin in (select tempdin
from #tempdins
where processed = 0)
--update with rowcount to see if finished
select #morework = ##rowcount
--set the tempdins to processed in working table
update #tempdins
set processed = 1
where processed = 0
--get new counts for variables for evaulation
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
else
begin
--get new counts for variables for evaulation
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
end
set nocount off
Here is what I have for ORACLE so far (writing in PL/SQL):
-- run the commented out statement in another query analyzer window to stop the script
-- update ##stopsign set val = 1
--if working tables exists clear them, if not create and initialize them
declare
v_sql LONG;
begin
v_sql:='CREATE GLOBAL TEMPORARY TABLE STOPSIGN;
(
VAL int
)';
execute immediate v_sql;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -955 THEN
NULL; -- suppresses ORA-00955 exception
ELSE
delete from STOPSIGN;
END IF;
END;
/
insert into STOPSIGN values (0);
--if working tables exists clear them, if not create and initialize them
declare
v_sql LONG;
begin
v_sql:='CREATE GLOBAL TEMPORARY TABLE TEMPDINS;
(
tempdin varch(255),
processed int null
)';
execute immediate v_sql;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -955 THEN
NULL; -- suppresses ORA-00955 exception
ELSE
delete from TEMPDINS;
END IF;
END;
/
--initialize #tempdins working table with all the records eligible to be unarchived
--edit the select statement if needed to change the records to be unarchived
insert into TEMPDINS(tempdin)
select * from (select tempdin
from document join packtype on packtype.packagetype=document.packagetype
and archivestatus = 'C' and ADRIVE is not null
and ADRIVE <> DRIVE ) where ROWNUM < 10;
--inialize variables with current values
Declare
archivecount int;
stopsign int;
morework int;
Begin
Select count(*) INTO archivecount from UNARCHS;
Select VAL into stopsign from STOPSIGN;
morework := 1;
END
--while there is more to do, unarchs table has less then 1000 records, and the stopsign value is 0 loop
WHILE morework > 0 and stopsign = 0
LOOP{
begin
if archivecount <1000
begin
-- number to be processed at once
-- change this value if you would like to dump more in to the unarchs table at once
set rowcount 100
update TEMPDINS
set processed = 0
where processed is null
}
--reset rowcount
set rowcount 0
--populate the unarchs table with valid values
--this will unarchive at the page (lowest) level
insert into UNARCHS (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from DOCUMENT
where tempdin in (select tempdin
from TEMPDINS
where processed = 0)
--update with rowcount to see if finished
select morework = select NUM_ROWS into morework from user_tables where table_name = 'UNARCHS'
--set the tempdins to processed in working table
update TEMPDINS
set processed = 1
where processed = 0
--get new counts for variables for evaulation
select archivecount = (select count(*) from unarchs)
select stopsign = (select val from STOPSIGN)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
else
begin
--get new counts for variables for evaulation
select archivecount = (select count(*) from unarchs)
select stopsign = (select val from STOPSIGN)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
end
End
END IF
END LOOP
Any help would be appreciated. My company has no Oracle resources for me to go to and Google is getting tired of me.
I think you can get rid of everything and just do a single insert statement:
insert into unarchs (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from DOCUMENT
where archivestatus = 'C'
And status = 'U';
I’m assuming you have a different process that updates the rows in the document table so this process doesn’t constantly pick up the same rows?

SQL Stored Procedure with Input parameters with While loop

I have a code below that should insert records into the table but unfortunately this code foes not work in case multiple records are inserted or updated or deleted. How should I rewrite the code for procedure to loop through all the inserted / deleted records? And I do need to use that stored procedure with Input parameters (not just simple insert into ... select ... from ...)
IF EXISTS (SELECT * FROM MyDB.sys.triggers WHERE object_id = OBJECT_ID(N'[dbo].[MyTable_DEL_UPD_INS]'))
DROP TRIGGER [dbo].[MyTable_DEL_UPD_INS]
GO
CREATE TRIGGER [dbo].[MyTable_DEL_UPD_INS]
ON [MyDB].[dbo].[MyTable]
AFTER DELETE, UPDATE, INSERT
NOT FOR REPLICATION
AS
BEGIN
DECLARE #PKId INT,
#Code VARCHAR(5),
#AuditType VARCHAR(10)
SET #Code = 'TEST'
IF EXISTS (SELECT * FROM deleted d)
AND NOT EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'DELETE'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
IF EXISTS (SELECT * FROM deleted d)
AND EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'UPDATE'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
IF NOT EXISTS (SELECT * FROM deleted d)
AND EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'INSERT'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
END
GO
ALTER TABLE [MyDB].[dbo].[MyTable] ENABLE TRIGGER [MyTable_DEL_UPD_INS]
You should avoid using loops in triggers.
Triggers should be as quick to run as possible, since SQL Server will not return control to whatever statement that fired the trigger until the trigger is completed.
So instead of a loop, you should modify your SP_Audit procedure to work with multiple records instead of a single one.
usually, this is easily be done using a table valued parameter.
If you could post the SP_Audit as well, we could give you a complete solution.
Since you didn't post it, you can use these guidelines as a start:
First, you create a user defined table type:
CREATE TYPE dbo.Ids AS TABLE
(
Id int NOT NULL PRIMARY KEY
)
GO
Then, you create the procedure to use it:
CREATE PROCEDURE [dbo].[STP_Audit_MultipleRecords]
(
#IDs dbo.Ids readonly,
#Code CHAR(4),
#AuditType CHAR(6)
)
AS
-- Implementation here
GO
Last, your write your trigger like this:
CREATE TRIGGER [dbo].[MyTable_DEL_UPD_INS]
ON [MyDB].[dbo].[MyTable]
AFTER DELETE, UPDATE, INSERT
NOT FOR REPLICATION
AS
BEGIN
DECLARE #HasDeleted bit = 0,
#HasInserted bit = 0,
#AuditType CHAR(6),
#Code CHAR(4)
SET #Code = 'TEST'
DECLARE #IDs as dbo.Ids
IF EXISTS (SELECT * FROM deleted d)
SET #HasDeleted = 1
IF EXISTS (SELECT * FROM inserted i)
SET #HasInserted = 1
IF #HasDeleted = 1
BEGIN
IF #HasInserted = 1
BEGIN
SET #AuditType = 'UPDATE'
END
ELSE
BEGIN
SET #AuditType = 'DELETE'
END
END
ELSE
IF #HasInserted = 1
BEGIN
SET #AuditType = 'INSERT'
END
INSERT INTO #IDs (Id)
SELECT [MyTable_PK]
FROM inserted
UNION
SELECT [MyTable_PK]
FROM deleted
EXEC [dbo].[STP_Audit_MultipleRecords] #IDs, #Code, #AuditType
END
GO
Notes:
The #HasDeleted and #HasInserted variables are to allow you to only execute the EXISTS query once for every procedure.
Getting the primary key values from the deleted and inserted table is done using a single union query. Since union eliminates duplicate values, you can write this query just once. If you want to, you can write a different query for each audit type, but then you will have to repeat the same query 3 times (with different tables)
I've changed the data types of your #code and #AuditType variables to char, since they have a fixed length.

SQL Server - OPTION OPTIMIZE FOR - 2 specific IDs

I'm creating a stored procedure that queries a lot of data but the query is specific to 2 customers in one country.
I'm just wondering if it's possible to add multiple CustomerIDs to an OPTIMIZE FOR statement e.g.
option (OPTIMIZE FOR (#CountryId = 122, #CustomerId = 321654, #CustomerId 78954))
The above example doesn't compile:
A compile-time literal value is specified more than once for the variable "#CustomerId" in one or more OPTIMIZE FOR clauses.
One thing you can do is call a different procedure depending on the parameter value and each procedure will calculate its own statistics, e.g. in your procedure IF #CustomerId = 321654 EXEC proc_321654 ELSE EXEC proc_78954. Something like this:
CREATE PROCEDURE innerProc_78954
AS
BEGIN
SELECT ...
WHERE CustomerId = 78954
END
GO
CREATE PROCEDURE innerProc_321654
AS
BEGIN
SELECT ...
WHERE CustomerId = 321654
END
GO
CREATE PROCEDURE outerProc
#CustomerId INT
AS
BEGIN
IF #CustomerId = 321654
EXEC innerProc_321654
ELSE
EXEC innerProc_78954
END
GO
So innerProc_78954 and innerProc_321654 are basically duplicates, but each is only ever called with a specific parameter, and thus each has statistics optimized for its own parameter.
Here's a simple example with sample data to demonstrate:
-- Create test data - 10,000 rows for CustomerId 1, 100 rows for CustomerId 2
CREATE TABLE #Orders (CustomerId INT, OrderDate DATETIME)
DECLARE #c1 INT = 0
DECLARE #c2 INT = 0
WHILE #c1 < 100
BEGIN
SET #c2 = 0
WHILE #c2 < 100
BEGIN
INSERT INTO #Orders (CustomerId, OrderDate) VALUES (1, GETDATE() - 100)
SET #c2 += 1
END
INSERT INTO #Orders (CustomerId, OrderDate) VALUES (2, GETDATE() - 100)
SET #c1 += 1
END
GO
-- The procedure optimized for CustomerId = 1
CREATE PROCEDURE #proc1
#CustomerId INT
AS
SELECT * FROM #Orders WHERE CustomerId = #CustomerId
GO
-- The procedure optimized for CustomerId = 2
CREATE PROCEDURE #proc2
#CustomerId INT
AS
SELECT * FROM #Orders WHERE CustomerId = #CustomerId
GO
-- The outer procedure to call from your client application.
CREATE PROCEDURE #procAll
#CustomerId INT
AS
IF #CustomerId = 1
EXEC #proc1 1
ELSE
EXEC #proc2 2
GO
-- Run your outer procedure and verify the actual # of rows matches the
-- estimated # of rows for each parameter.
EXEC #procAll 1
EXEC #procAll 2
-- Now run the inner procedures with the incorrect parameters - note that the
-- estimated # of rows does not match the actual # of rows, demonstrating that
-- statistics were optimized for the other parameter.
EXEC #proc1 2
EXEC #proc2 1

How to chunk updates to SQL Server?

I want to update a table in SQL Server by setting a FLAG column to 1 for all values since the beginning of the year:
TABLE
DATE ID FLAG (more columns...)
2016/01/01 1 0 ...
2016/01/01 2 0 ...
2016/01/02 3 0 ...
2016/01/02 4 0 ...
(etc)
Problem is that this table contains hundreds of millions of records and I've been advised to chunk the updates 100,000 rows at a time to avoid blocking other processes.
I need to remember which rows I update because there are background processes which immediately flip the FLAG back to 0 once they're done processing it.
Does anyone have suggestions on how I can do this?
Each day's worth of data has over a million records, so I can't simply loop using the DATE as a counter. I am thinking of using the ID
Assuming the date column and the ID column are sequential you could do a simple loop. By this I mean that if there is a record id=1 and date=2016-1-1 then record id=2 date=2015-12-31 could not exist. If you are worried about locks/exceptions you should add a transaction in the WHILE block and commit or rollback on failure.
Change the #batchSize to whatever you feel is right after some experimentation.
DECLARE #currentId int, #maxId int, #batchSize int = 10000
SELECT #currentId = MIN(ID), #maxId = MAX(ID) FROM YOURTABLE WHERE DATE >= '2016-01-01'
WHILE #currentId < #maxId
BEGIN
UPDATE YOURTABLE SET FLAG = 1 WHERE ID BETWEEN #currentId AND (#currentId + #batchSize)
SET #currentId = #currentId + #batchSize
END
As this as the update will never flag the same record to 1 twice I do not see a need to track which records were touched unless you are going to manually stop the process partway through.
You should also ensure that the ID column has an index on it so the retrieval is fast in each update statement.
Looks like a simple question or maybe I'm missing something.
You can create a temp/permanent table to keep track of updated rows.
create tbl (Id int) -- or temp table based on your case
insert into tbl values (0)
declare #lastId int = (select Id from tbl)
;with cte as (
select top 100000
from YourMainTable
where Id > #lastId
ORDER BY Id
)
update cte
set Flag = 1
update tbl set Id = #lastId + 100000
You can do this process in a loop (except the table creation part)
create table #tmp_table
(
id int ,
row_number int
)
insert into #tmp_table
(
id,
row_number
)
--logic to load records from base table
select
bt.id,
row_number() over(partition by id order by id ) as row_number
from
dbo.bas_table bt
where
--ur logic to limit the records
declare #batch_size int = 100000;
declare #start_row_number int,#end_row_number int;
select
#start_row_number = min(row_number),
#end_row_number = max(row_number)
from
#tmp_table
while(#start_row_number < #end_row_number)
begin
update top #batch_size
bt
set
bt.flag = 1
from
dbo.base_table bt
inner join #tmp_table tt on
tt.Id = bt.Id
where
bt.row_number between #start_row_number and (#start_row_number + #batch_size)
set #start_row_number = #start_row_number + #batch_size
end

Do Inserted Records Always Receive Contiguous Identity Values

Consider the following SQL:
CREATE TABLE Foo
(
ID int IDENTITY(1,1),
Data nvarchar(max)
)
INSERT INTO Foo (Data)
SELECT TOP 1000 Data
FROM SomeOtherTable
WHERE SomeColumn = #SomeParameter
DECLARE #LastID int
SET #LastID = SCOPE_IDENTITY()
I would like to know if I can depend on the 1000 rows that I inserted into table Foo having contiguous identity values. In order words, if this SQL block produces a #LastID of 2000, can I know for certain that the ID of the first record I inserted was 1001? I am mainly curious about multiple statements inserting records into table Foo concurrently.
I know that I could add a serializable transaction around my insert statement to ensure the behavior that I want, but do I really need to? I'm worried that introducing a serializable transaction will degrade performance, but if SQL Server won't allow other statements to insert into table Foo while this statement is running, then I don't have to worry about it.
I disagree with the accepted answer. This can easily be tested and disproved by running the following.
Setup
USE tempdb
CREATE TABLE Foo
(
ID int IDENTITY(1,1),
Data nvarchar(max)
)
Connection 1
USE tempdb
SET NOCOUNT ON
WHILE NOT EXISTS(SELECT * FROM master..sysprocesses WHERE context_info = CAST('stop' AS VARBINARY(128) ))
BEGIN
INSERT INTO Foo (Data)
VALUES ('blah')
END
Connection 2
USE tempdb
SET NOCOUNT ON
SET CONTEXT_INFO 0x
DECLARE #Output TABLE(ID INT)
WHILE 1 = 1
BEGIN
/*Clear out table variable from previous loop*/
DELETE FROM #Output
/*Insert 1000 records*/
INSERT INTO Foo (Data)
OUTPUT inserted.ID INTO #Output
SELECT TOP 1000 NEWID()
FROM sys.all_columns
IF EXISTS(SELECT * FROM #Output HAVING MAX(ID) - MIN(ID) <> 999 )
BEGIN
/*Set Context Info so other connection inserting
a single record in a loop terminates itself*/
DECLARE #stop VARBINARY(128)
SET #stop = CAST('stop' AS VARBINARY(128))
SET CONTEXT_INFO #stop
/*Return results for inspection*/
SELECT ID, DENSE_RANK() OVER (ORDER BY Grp) AS ContigSection
FROM
(SELECT ID, ID - ROW_NUMBER() OVER (ORDER BY [ID]) AS Grp
FROM #Output) O
ORDER BY ID
RETURN
END
END
Yes, they will be contiguous because the INSERT is atomic: complete success or full rollback. It is also performed as a single unit of work: you wont get any "interleaving" with other processes
However (or to put your mind at rest!), consider the OUTPUT clause
DECLARE #KeyStore TABLE (ID int NOT NULL)
INSERT INTO Foo (Data)
OUTPUT INSERTED.ID INTO #KeyStore (ID) --this line
SELECT TOP 1000 Data
FROM SomeOtherTable
WHERE SomeColumn = #SomeParameter
If you want the Identity values for multiple rows use OUTPUT:
DECLARE #NewIDs table (PKColumn int)
INSERT INTO Foo (Data)
OUTPUT INSERTED.PKColumn
INTO #NewIDs
SELECT TOP 1000 Data
FROM SomeOtherTable
WHERE SomeColumn = #SomeParameter
you now have the entire set of values in the #NewIDs table. You can add any columns from the Foo table into the #NewIDs table and insert those columns as well.
It is not good practice to attach any sort of meaning whatsoever to identity values. You should assume that they are nothing more than integers guaranteed to be unique within the scope of your table.
Try adding the following:
option(maxdop 1)

Resources