I run a simple insert/update/delete 1m rows script to crudely check the health of our SQL server installations. It's 10 times slower in Azure SQL(S6) than on our in-house test server. Anyone experienced similar problems? Is there a fundamental difference in the way that Azure SQL behaves which invalidates the test?
Test Results
Our Internal Server
32GB RAM, Intel Xenon 3.3 Ghz
(1000000 rows affected)
Insert Duration = 124 Seconds
Update Duration = 3 Seconds
Delete Duration = 3 Seconds
Azure SQL database 400 DTUs (S6)
(1000000 rows affected)
Insert Duration = 1267 Seconds
Update Duration = 36 Seconds
Delete Duration = 71 Seconds
SQL Server Script
IF (SELECT COUNT(name) FROM sysobjects WHERE name = 'PerfTest') >0
BEGIN
DROP TABLE PerfTest
CREATE TABLE [dbo].[PerfTest](
[PerfID] [int] NOT NULL,
[PerfTX] [varchar](20) COLLATE Latin1_General_CI_AI NULL,
[PerfDT] [datetime] NULL,
[PerfNm] [int] NULL,
CONSTRAINT [PK_PerfTest] PRIMARY KEY CLUSTERED
([PerfID] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
END
ELSE
BEGIN
CREATE TABLE [dbo].[PerfTest](
[PerfID] [int] NOT NULL,
[PerfTX] [varchar](20) COLLATE Latin1_General_CI_AI NULL,
[PerfDT] [datetime] NULL,
[PerfNm] [int] NULL,
CONSTRAINT [PK_PerfTest] PRIMARY KEY CLUSTERED
([PerfID] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
END
DECLARE
#InsertStart DATETIME,
#InsertEnd DATETIME,
#DeleteStart DATETIME,
#DeleteEnd DATETIME,
#UpdateStart DATETIME,
#UpdateEnd DATETIME,
#PID AS INT,
#PTX AS VARCHAR(20),
#PDT AS DATETIME,
#PNM AS INT
BEGIN
PRINT 'Timings will be at the bottom of this result set'
SET #PID = 0
SET #PNM = 0
SET #PTX = 'ABCDEFGHIJABCDEFGHIJ'
SET #InsertStart = GETDATE()
--Insert Test
WHILE (#PID < 1000000)
BEGIN
SET #PID = #PID + 1
SET #PNM = #PNM + 1
SET #PDT = GETDATE()
INSERT INTO PerfTest VALUES(#PID, #PTX, #PDT, #PNM)
END
SET #InsertEnd = GETDATE()
--Begin Update Test
SET #UpdateStart = GETDATE()
UPDATE PerfTest SET PerfNm = PerfNm + 1
SET #UpdateEnd = GETDATE()
--Begin Delete Test
SET #DeleteStart = GETDATE()
DELETE FROM PerfTest
SET #DeleteEnd = GETDATE()
PRINT 'Insert Duration = ' ++ CAST(DATEDIFF(SS,#InsertStart, #InsertEnd) AS CHAR(5)) ++ ' Seconds'
PRINT 'Update Duration = ' ++ CAST(DATEDIFF(SS,#UpdateStart, #UpdateEnd) AS CHAR(5)) ++ ' Seconds'
PRINT 'Delete Duration = ' ++ CAST(DATEDIFF(SS,#DeleteStart, #DeleteEnd) AS CHAR(5)) ++ ' Seconds'
DROP TABLE PerfTest
END
Thanks people, I'd really appreciate you sharing any experience you have in this area
Related
I now have a simple example of this issue following on from: Snapshot isolation transaction aborted due to update conflict in SQL Server
This is the script to create the database tables:
CREATE TABLE [dbo].[tblPPObjectChildObjectList](
[SortIndex] [int] NOT NULL,
[UpdateTime] [datetime] NULL,
[InsertionID] [bigint] NOT NULL,
[ChildInsertionID] [bigint] NOT NULL,
[SortText] [nvarchar](260) NULL,
[UpdateID] [bigint] NULL,
[RemovalThreshold] [bigint] NULL,
CONSTRAINT [PK_tblPPObjectChildObjectList] PRIMARY KEY CLUSTERED
(
[InsertionID] ASC,
[ChildInsertionID] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 85, OPTIMIZE_FOR_SEQUENTIAL_KEY = OFF) ON [PRIMARY]
) ON [PRIMARY]
GO
CREATE NONCLUSTERED INDEX [IX_tblPPObjectChildObjectList_ChildInsertionID_INC_InsertionID_UpdateID_SortText_SortIndex_UpdateTime_RemovalThreshold] ON [dbo].[tblPPObjectChildObjectList]
(
[ChildInsertionID] ASC
)
INCLUDE([InsertionID],[UpdateID],[SortText],[SortIndex],[UpdateTime],[RemovalThreshold]) WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 85, OPTIMIZE_FOR_SEQUENTIAL_KEY = OFF) ON [PRIMARY]
GO
CREATE NONCLUSTERED INDEX [IX_tblPPObjectChildObjectList_InsertionID_UpdateID_INC_SortText_SortIndex_UpdateTime_RemovalThreshold] ON [dbo].[tblPPObjectChildObjectList]
(
[InsertionID] ASC,
[UpdateID] ASC
)
INCLUDE([SortText],[SortIndex],[UpdateTime],[RemovalThreshold]) WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 85, OPTIMIZE_FOR_SEQUENTIAL_KEY = OFF) ON [PRIMARY]
GO
CREATE TYPE [dbo].[udtPPChildObjectList] AS TABLE(
[InsertionId] [bigint] NOT NULL,
[ChildInsertionId] [bigint] NOT NULL,
[SortIndex] [int] NULL,
[UpdateId] [bigint] NULL,
[SortText] [nvarchar](260) NULL,
[RemovalThreshold] [bigint] NULL,
PRIMARY KEY CLUSTERED
(
[ChildInsertionId] ASC
)WITH (IGNORE_DUP_KEY = OFF)
)
GO
CREATE TYPE [dbo].[udtPPInsertionIDList] AS TABLE(
[InsertionID] [bigint] NOT NULL,
PRIMARY KEY CLUSTERED
(
[InsertionID] ASC
)WITH (IGNORE_DUP_KEY = OFF)
)
GO
Snapshot isolation must be on in the database and read committed snapshot must be on.
This script should be used to populate the table:
declare #i int
set #i = 1
while (#i < 200)
begin
insert into [dbo].[tblPPObjectChildObjectList]
select -1, GetUTCDate(), #i, #i * 1000, null, 1, null
set #i = #i + 1
end
GO
There are then two scripts which must be run at the same time. This is the update script:
SET TRANSACTION ISOLATION LEVEL READ COMMITTED
GO
declare #insertionid bigint
set #insertionid = 1
while (1 = 1)
begin
BEGIN TRY
BEGIN TRANSACTION;
WAITFOR DELAY '00:00:01';
declare #updatetime datetime
set #updatetime = GetUTCDate()
declare #values dbo.udtPPChildObjectList
delete from #values
insert into #values select 1, 1000, -1, 1, null, null
insert into #values select 2, 2000, -1, 1, null, null
insert into #values select 3, 3000, -1, 1, null, null
insert into #values select 4, 4000, -1, 1, null, null
insert into #values select 5, 5000, -1, 1, null, null
insert into #values select 6, 6000, -1, 1, null, null
insert into #values select 7, 7000, -1, 1, null, null
insert into #values select 8, 8000, -1, 1, null, null
insert into #values select 9, 9000, -1, 1, null, null
insert into #values select 10, 10000, -1, 1, null, null
update t
set t.UpdateTime = #updatetime
from tblPPObjectChildObjectList as t
join #values as s
on s.ChildInsertionId = t.ChildInsertionID
select t.ChildInsertionID
from tblPPObjectChildObjectList as t with (updlock, rowlock)
left join #values as s
on s.InsertionId = t.InsertionID and s.ChildInsertionId = t.ChildInsertionID
where (t.InsertionID in (select InsertionId from #values)) and (s.ChildInsertionId is null)
COMMIT TRANSACTION;
END TRY
BEGIN CATCH
ROLLBACK TRANSACTION;
print 'ERROR :' + ERROR_MESSAGE()
break;
END CATCH
end
GO
and this is the delete script:
SET TRANSACTION ISOLATION LEVEL SNAPSHOT
GO
while (1 = 1)
begin
BEGIN TRY
WAITFOR DELAY '00:00:01';
declare #insertionids dbo.udtPPInsertionIDList
declare #i int
set #i = 1
while (#i < 150)
begin
insert into #insertionids
select 90000000 + #i
set #i = #i + 1
end
set deadlock_priority low
set nocount on
-- Create the required temporary tables
declare #LocalInsertionIDs table (InsertionID bigint, PRIMARY KEY (InsertionID))
delete from #LocalInsertionIDs
insert into #LocalInsertionIDs
select InsertionID from #insertionids
if ((select count(*) from #LocalInsertionIDs) > 0)
begin
declare #c4 int
select #c4 = count(*)
from tblPPObjectChildObjectList as pocol
join #LocalInsertionIDs as ii
on pocol.InsertionID = ii.InsertionID
delete from pocol with (rowlock, updlock)
from tblPPObjectChildObjectList as pocol with (rowlock, updlock)
join #LocalInsertionIDs as ii
on pocol.InsertionID = ii.InsertionID
declare #c5 int
select #c5 = count(*)
from tblPPObjectChildObjectList as pocol
join #LocalInsertionIDs as ii
on pocol.ChildInsertionID = ii.InsertionID
delete from pocol with (rowlock, updlock)
from tblPPObjectChildObjectList as pocol with (rowlock, updlock)
join #LocalInsertionIDs as ii
on pocol.ChildInsertionID = ii.InsertionID
end
delete from #insertionids
END TRY
BEGIN CATCH
print 'ERROR :' + ERROR_MESSAGE()
break;
END CATCH
end
GO
After 10-15 minutes the delete script will fail with the update error even though the rows being removed are not being updated (in fact they do not even exist).
Can anyone see why this exception is being raised?
After 10-15 minutes the delete script will fail with the update error
even though the rows being removed are not being inserted or updated.
The query below to get the rows to be deleted performs a full scan of the tblPPObjectBlobProperty table because no index exists on UpdateTime. The UPDLOCK lock will fail when rows outside the range to be deleted are accessed and the row has been modified by another transaction.
insert into #InsertionIDs
select distinct InsertionID, UpdateTime from tblPPObjectBlobProperty as poco with (rowlock, updlock)
where UpdateTime < #thresholddatetime
Add an index on the UpdateTime column so that only the rows to be deleted are touched. This should avoid the update conflict error.
CREATE INDEX idx_tblPPObjectBlobProperty_UpdateTime ON dbo.tblPPObjectBlobProperty(UpdateTime);
On a side note, I suggest you use THROW to facilitate troubleshooting. The error message will include the line number of the problem statement in the script. Also, add SET XACT_ABORT ON; to scripts/procs with explicit transactions to ensure the transaction is rolled back immediately after an error, client timeout, or query cancel. Below is the standard catch block I use.
BEGIN CATCH
IF ##TRANCOUNT > 0 ROLLBACK TRANSACTION;
THROW;
END CATCH;
I have a system-versioning table with history table related as follows:
CREATE TABLE [dbo].[ExpenseCenter_Archive](
[ExpenseCenterId] [tinyint] NOT NULL,
[Name] [nvarchar](200) NOT NULL,
[LineCode] [smallint] NOT NULL,
[SysStartTime] [datetime2](2) NOT NULL,
[SysEndTime] [datetime2](2) NOT NULL
) ON [FG_HISTORY]
GO
-------
CREATE TABLE [dbo].[ExpenseCenter](
[ExpenseCenterId] [tinyint] NOT NULL,
[Name] [nvarchar](200) NOT NULL,
[LineCode] [smallint] NOT NULL,
[SysStartTime] [datetime2](2) GENERATED ALWAYS AS ROW START NOT NULL,
[SysEndTime] [datetime2](2) GENERATED ALWAYS AS ROW END NOT NULL,
CONSTRAINT [PK_ExpenseCenter] PRIMARY KEY CLUSTERED
(
[ExpenseCenterId] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 90) ON [FG_DATA],
CONSTRAINT [UK_ExpenseCenterName] UNIQUE NONCLUSTERED
(
[Name] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 90) ON [FG_INDEX],
PERIOD FOR SYSTEM_TIME ([SysStartTime], [SysEndTime])
) ON [FG_DATA]
WITH
(
SYSTEM_VERSIONING = ON (HISTORY_TABLE = [dbo].[ExpenseCenter_Archive] , DATA_CONSISTENCY_CHECK = ON )
)
GO
Now, I want alter data type of 'LineCode' in system-version table and history. After changes once again enabling it as follows:
--- Before edit column
ALTER TABLE [dbo].[ExpenseCenter] SET (SYSTEM_VERSIONING = OFF);
-- ## Edit column in ssms ##
--- After edit column
ALTER TABLE [dbo].[ExpenseCenter]
SET
(
SYSTEM_VERSIONING = ON (HISTORY_TABLE = [dbo].[ExpenseCenter_Archive])
);
But I get the following error:
Cannot set SYSTEM_VERSIONING to ON when SYSTEM_TIME period is not defined.
How do I solve this issue.
From your question ,you are saying that ExpenseCenter_archive is the temporal table for ExpenseCenter..but error message says
you don't have system versioned table [dbo].[ExpenseCenter] ,if you want system versioned table ,Add system_time to it
so here are the steps,i would follow to make a table Temporal table of other..
if its for a new table ..
CREATE TABLE Department
(
DeptID int NOT NULL PRIMARY KEY CLUSTERED
, DeptName varchar(50) NOT NULL
, ManagerID INT NULL
, ParentDeptID int NULL
, SysStartTime datetime2 GENERATED ALWAYS AS ROW START NOT NULL
, SysEndTime datetime2 GENERATED ALWAYS AS ROW END NOT NULL
, PERIOD FOR SYSTEM_TIME (SysStartTime,SysEndTime)
)
WITH (SYSTEM_VERSIONING = ON)
;
if i need to alter data type for this newly created table..
MSDN recommends doing it in a transaction..
BEGIN TRAN
ALTER TABLE [dbo].[CompanyLocation] SET (SYSTEM_VERSIONING = OFF);
ALTER TABLE [CompanyLocation] ADD Cntr INT IDENTITY (1,1);
ALTER TABLE [dbo].[CompanyLocation]
SET
(
SYSTEM_VERSIONING = ON (HISTORY_TABLE = [dbo].[CompanyLocationHistory])
);
COMMIT ;
If i want to make an existing table Temporal,then i would do like below
ALTER TABLE dbo.Product
ADD StartTime DATETIME2 GENERATED ALWAYS AS ROW START
HIDDEN DEFAULT GETUTCDATE(),
EndTime DATETIME2 GENERATED ALWAYS AS ROW END
HIDDEN DEFAULT
CONVERT(DATETIME2, '9999-12-31 23:59:59.9999999'),
PERIOD FOR SYSTEM_TIME (StartTime, EndTime)
Now finally set Temporal ON
ALTER TABLE dbo.Product
SET (SYSTEM_VERSIONING = ON (HISTORY_TABLE=dbo.ProductHistory))
GO
References:
http://sqlhints.com/tag/modify-existing-table-as-system-versioned-temporal-table/
https://msdn.microsoft.com/en-us/library/mt590957.aspx
for alter system versioning table you don't need set SYSTEM_VERSIONING = OFF, but directly use ALTER TABLE ...
I need to create a method of creating a unique order number. Each order number must always be greater than the last, however they should not always be consecutive. The solution must work within a web farm environment.
Currently have a stored procedure which is responsible for getting a new Order number, which has to be seeded so that the order number is not consecutive. The application is now moving from a single server to a web farm and therefore controlling access to the stored procedure via a using a lock in C# is no longer viable as a method of controlling access. I have updated the stored procedure as below however I am concerned that I am going to introduce blocks\locks\deadlocks when concurrent calls occur.
The table and index structures are as follows
MyAppSetting Table
CREATE TABLE [dbo].[MyAppSetting](
[SettingName] [nvarchar](255) NOT NULL,
[SettingValue] [nvarchar](max) NOT NULL,
CONSTRAINT [PK_MyAppSetting] PRIMARY KEY CLUSTERED
(
[SettingName] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY]
My Order table
CREATE TABLE [dbo].[MyOrder](
[id] [int] IDENTITY(1,1) NOT NULL,
[OrderNumber] [nvarchar](50) NOT NULL CONSTRAINT [DF_MyOrder_OrderNumber] DEFAULT (N''),
... rest of the table
CONSTRAINT [PK_MyOrder] PRIMARY KEY NONCLUSTERED
(
[id] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY]
The Sql Transaction
Set Transaction Isolation Level Serializable;
Begin Transaction
--Gen random number
SELECT #Random = ROUND(((#HighSeed - #LowSeed -1) * RAND() + #LowSeed), 0)
--Get Seed
select #Seed = [SettingValue] FROM [MyAppSetting] where [SettingName] = 'OrderNumberSeed'
--Removed concurrency and not required as order numbe should not exceed the seed number
--select #MaxOrderNUmber = Max(OrderNumber) FROM MyOrder
--if #MaxOrderNumber >= #Seed Begin
-- Set #Seed = #MaxOrderNumber
--end
-- New Seed
Set #OrderNumber = #Seed + #Random
Update [MyAppSetting] Set [SettingValue] = #OrderNumber where [SettingName] = 'OrderNumberSeed'
select #OrderNumber
Commit
With the revised SQL you provided you only select and update one table. You can do this in a single query which should avoid the risk of deadlocks, and avoids the need for an explicit transaction.
Setup:
CREATE TABLE OrderNumber ( NextOrderNumber int)
INSERT OrderNumber(NextOrderNumber) values (123)
Get Next Order Number
DECLARE #MinIncrement int = 5
DECLARE #MaxIncrement int = 50
DECLARE #Random int = ROUND(((#MaxIncrement - #MinIncrement -1) * RAND() + #MinIncrement), 0)
DECLARE #OrderNumber int
UPDATE OrderNumber
SET #OrderNumber=NextOrderNumber, NextOrderNumber = NextOrderNumber + #Random
SELECT #OrderNumber
I changed LowSeed and HighSeed to MinIncrement and MaxIncrement as I found the term Seed here to be confusing. I would use a table dedicated to tracking the order number to avoid locking anything else on the MyAppSetting table.
I would also challenge the requirement of having an order that always increases, but not sequentially - without this a GUID would be easier.
Alternatives to consider would be to have the order number derived from the time somehow - with last digit to identify different servers.
I have a question with regards to performance currently I have a table that is having trouble with query performance whenever the table rows in already millions of record.
This is the table:
CREATE TABLE [dbo].[HistorySampleValues]
(
[HistoryParameterID] [int] NOT NULL,
[SourceTimeStamp] [datetime2](7) NOT NULL,
[ArchiveTimestamp] [datetime2](7) NOT NULL CONSTRAINT [DF__HistorySa__Archi__2A164134] DEFAULT (getutcdate()),
[ValueStatus] [int] NOT NULL,
[ArchiveStatus] [int] NOT NULL,
[IntegerValue] [bigint] SPARSE NULL,
[DoubleValue] [float] SPARSE NULL,
[StringValue] [varchar](100) SPARSE NULL,
[EnumNamedSetName] [varchar](100) SPARSE NULL,
[EnumNumericValue] [int] SPARSE NULL,
[EnumTextualValue] [varchar](256) SPARSE NULL
) ON [PRIMARY]
CREATE CLUSTERED INDEX [Source_HistParameterID_Index] ON [dbo].[HistorySampleValues]
(
[HistoryParameterID] ASC,
[SourceTimeStamp] ASC
) WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON)
GO
It's fairly flat with a clustered index on HistoryParameterID and SourceTimeStamp.
This is the stored procedure that I'm using
SET NOCOUNT ON;
DECLARE #SqlCommand NVARCHAR(MAX)
SET #SqlCommand = 'SELECT HistoryParameterID,
SourceTimestamp, ArchiveTimestamp,ValueStatus,ArchiveStatus,
IntegerValue,DoubleValue,StringValue,EnumNumericValue,
EnumTextualValue,EnumNamedSetName
FROM [HistorySampleValues] WITH(NOLOCK)
WHERE ([HistoryParameterID] =' + #ParamIds + '
AND
[SourceTimeStamp] >= ''' + CONVERT(VARCHAR(30),#StartTime, 25) + '''
AND
[SourceTimeStamp] <= ''' + CONVERT(VARCHAR(30),#EndTime, 25) + ''')
AND ValueStatus = ' + #ValueStatus
EXECUTE( #SqlCommand )
As you can see the HistoryParameterID and SourceTimestamp are being used as the parameters for the first query. And retrieving 8hrs worth of records which is ~28k records, it returns with an erratic performance, 1.8seconds - 700ms
Will the design scale? whenever it reaches 77 billion records? or is there any strategy to be used? the version of SQL Server is Standard Edition so there is no partitioning, columnstore to be used. Or have I reached the maximum performance of SQL Server Standard Edition?
this is the updated stored proc
#ParamIds int,
#StartTime datetime,
#EndTime datetime,
#ValueStatus int
AS
BEGIN
SET NOCOUNT ON;
SELECT HistoryParameterID,
SourceTimestamp, ArchiveTimestamp,ValueStatus,ArchiveStatus,
IntegerValue,DoubleValue,StringValue,EnumNumericValue,
EnumTextualValue,EnumNamedSetName
FROM [HistorySampleValues] WITH(NOLOCK)
WHERE
HistoryParameterID = #ParamIds
AND (SourceTimeStamp >= #StartTime AND SourceTimeStamp <=#EndTime)
AND (#ValueStatus = -1 OR ValueStatus = #ValueStatus)
I got a 1.396 second client processing time in retrieving 41213 rows to a ~849600000 rows in the table.
is there a way to improve this?
Everytime you execute a new SQL command, it has to be compiled by the MS SQL Server. If you re-use the command, you save on compilation time. You need to directly execute the command in the stored procedure something like this, which should allow compilation and give you more consistent results.
SELECT ...
WHERE ([HistoryParameterID] = #ParamIds
AND [SourceTimeStamp] >= #StartTime
AND [SourceTimeStamp] <= #EndTime
AND ValueStatus = #ValueStatus
This will give you also an opportunity to monitor the performance of the command.
I am aware that, some causes for index fragmentation are:
Non Sequential inserts – when doing a non-sequential insert, SQL Server moves ~50% of data from the old page to the newly allocated page. This would result in a page split, with each page having ~50% of data from the old page.
Updates to an existing row value with a larger value, which doesn’t fit on the same page
I have heard that even if you rollback the transaction, the fragmentation remains, but I could not find documentation for that.
does anybody have documentation for that, or a script to prove this?
Today I did some tests, and the results were not exactly what I would expect.
The environment:
Microsoft SQL Server 2008 (SP2) - 10.0.4000.0 (X64) Sep 16 2010 19:43:16 Copyright (c) 1988-2008 Microsoft Corporation Enterprise Edition (64-bit) on Windows NT 6.0 (Build 6002: Service Pack 2)
First of all I looked for a table that already has fragmentation.
Surprisingly, inside my DBA database I found a table called tableSizeBenchmark.
USE [DBA]
GO
CREATE TABLE [dbo].[tableSizeBenchmark](
[lngID] [bigint] IDENTITY(1,1) NOT FOR REPLICATION NOT NULL,
[dbName] [varchar](100) NOT NULL,
[tableName] [varchar](100) NOT NULL,
[creationDate] [smalldatetime] NOT NULL,
[numberOfRows] [bigint] NULL,
[spaceUsedMb] [numeric](18, 0) NULL,
CONSTRAINT [PK_tableSizeBenchmark] PRIMARY KEY CLUSTERED
(
[lngID] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF,
ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON)
ON [PRIMARY]
) ON [PRIMARY]
GO
USE [DBA]
GO
CREATE UNIQUE NONCLUSTERED INDEX [UIXtableSizeBenchmark] ON [dbo].[tableSizeBenchmark]
(
[dbName] ASC,
[tableName] ASC,
[creationDate] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF,
IGNORE_DUP_KEY = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON,
ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
GO
This is the level of fragmentation BEFORE doing any test:
You need to create these 2 procedures in order to carry on the same test.
basically I used random string generator and a random number generator, just because I wanted to insert 10,000 records and see how the make the fragmentation worse, and later on ROLLBACK the transaction and see the true, if the fragmentation remains or goes away.
--DROP PROCEDURE GetRandomString
--GO
--DROP PROCEDURE GetRandomNumber
--GO
create procedure GetRandomString (#STR VARCHAR(100) OUTPUT)
as
begin
-- generates a random string
-- marcelo miorelli
-- 01-oct-2014
-- one of the other features that makes this more flexible:
-- By repeating blocks of characters in #CharPool,
-- you can increase the weighting on certain characters so that they are more likely to be chosen.
DECLARE #z INT
, #i INT
, #MIN_LENGTH INT
, #MAX_LENGTH INT
DECLARE #CharPool VARCHAR(255)
DECLARE #RandomString VARCHAR(255)
DECLARE #PoolLength INT
SELECT #MIN_LENGTH = 20
SELECT #MAX_LENGTH = 100
--SET #z = RAND() * (#max_length - #min_length + 1) + #min_length
SET #Z = 50
-- define allowable character explicitly - easy to read this way an easy to
-- omit easily confused chars like l (ell) and 1 (one) or 0 (zero) and O (oh)
SET #CharPool =
'abcdefghijkmnopqrstuvwxyzABCDEFGHIJKLMNPQRSTUVWXYZ23456789.,-_!$##%^&*'
SET #CharPool =
'ABCDEFGHIJKLMNPQRSTUVWXYZ'
SET #PoolLength = Len(#CharPool)
SET #i = 0
SET #RandomString = ''
WHILE (#i < #z) BEGIN
SELECT #RandomString = #RandomString +
SUBSTRING(#Charpool, CONVERT(int, RAND() * #PoolLength), 1)
SELECT #i = #i + 1
END
SELECT #STR = #RandomString
end
GO
create procedure GetRandomNumber (#number int OUTPUT)
as
begin
-- generate random numbers
-- marcelo miorelli
-- 01-oct-2014
DECLARE #maxval INT, #minval INT
select #maxval=10000,#minval=500
SELECT #Number = CAST(((#maxval + 1) - #minval) *
RAND(CHECKSUM(NEWID())) + #minval AS INT)
end
go
After you have created the procedures above, see below the code that I have used to run this test:
SELECT object_id AS ObjectID,
object_NAME (Object_id) as Table_NAME,
index_id AS IndexID,
avg_fragmentation_in_percent AS PercentFragment,
fragment_count AS TotalFrags,
avg_fragment_size_in_pages AS PagesPerFrag,
page_count AS NumPages
FROM sys.dm_db_index_physical_stats(DB_ID('dba'),
NULL, NULL, NULL , 'DETAILED')
WHERE OBJECT_ID = OBJECT_ID('tableSizeBenchmark')
and avg_fragmentation_in_percent > 0
so the result:
After running the above script but BEFORE ROLLBACK OR COMMIT, the transaction is still open:
Please note that the fragmentation has INCREASED.
Would this fragmentation REMAIN or DISAPPEAR after we rollback this transaction?
Let me post here also, the script that I use to see the fragmentation level:
SELECT object_id AS ObjectID,
object_NAME (Object_id) as Table_NAME,
index_id AS IndexID,
avg_fragmentation_in_percent AS PercentFragment,
fragment_count AS TotalFrags,
avg_fragment_size_in_pages AS PagesPerFrag,
page_count AS NumPages
FROM sys.dm_db_index_physical_stats(DB_ID('dba'),
NULL, NULL, NULL , 'DETAILED')
WHERE OBJECT_ID = OBJECT_ID('tableSizeBenchmark')
and avg_fragmentation_in_percent > 0
and the results of this experiment:
As you can see, the fragmentation levels went back to the original situation, before the transaction.
Hope this helps
Marcelo