I'm getting an error message but it exec fine? - sql-server

IF EXISTS (SELECT name
FROM sys.tables
WHERE name = 'Nums')
BEGIN
DROP TABLE dbo.Nums;
END
CREATE TABLE dbo.Nums
(
number INT NOT NULL,
CONSTRAINT PK_Nums PRIMARY KEY CLUSTERED(number ASC),
code Char(9),
date DATETIME
) ON [PRIMARY]
INSERT INTO Nums (number, code, date)
VALUES (0, 485658235, '2000/01/01')
DECLARE #number int, #code Char(9), #date datetime
SET #number = (SELECT MAX (Number) FROM nums)
SET #date = (SELECT Date FROM Nums)
SET #code = (SELECT code FROM Nums)
WHILE #number < 100000
BEGIN
INSERT INTO Nums--(number, code, date)
VALUES (#number, #code, #date)
SET #number = #number + 1
SET #date = DATEADD(DAY, 5, #date)
SET #code = LEFT(CAST(CAST(CEILING(RAND()* 10000000000) AS bigint) AS varchar),9)
END
SELECT * FROM Nums
This is the error I get:
Msg 2627, Level 14, State 1, Line 41
Violation of PRIMARY KEY constraint 'PK_Nums'. Cannot insert duplicate key in object 'dbo.Nums'. The duplicate key value is (0)

You need to add 1 to the number before the loop starts.
The error Violation of PRIMARY KEY constraint is not normally batch-aborting, therefore execution will continue with the next statement.
If you want to stop that, either force the batch to abort and rollback: SET XACT_ABORT ON (probably a good idea), or use TRY/CATCH.
I assume you also realize your procedure can be done in one statement, by joining a tally table or function (for example these by Itzik Ben-Gan), something like this:
INSERT #Nums (number, code, date)
SELECT
t.Num,
LEFT(CAST(CAST(CEILING(RAND()* 10000000000) AS bigint) AS varchar),9),
'2000/01/01'
FROM fnTallyTable (0, 99999) t

Related

Stored procedure in that needs to return unique values in a custom format but seems to return duplicates

I have a stored procedure in Microsoft SQL Server that should return unique values based on a custom format: SSSSTT99999 where SSSS and TT is based on a parameter and 99999 is a unique sequence based on the values of SSSS and TT. I need to store the last sequence based on SSSS and TT on table so I can retrieve the next sequence the next time. The problem with this code is that in a multi-user environment, at least two simultaneous calls may generate the same value. How can I make sure that each call to this stored procedure gets a unique value?
CREATE PROCEDURE GenRef
#TT nvarchar(30),
#SSSS nvarchar(50)
AS
declare #curseq as integer
set #curseq=(select sequence from DocSequence where
docsequence.TT=#TT and
DocSequence.SSSS=#SSSS)
if #curseq is null
begin
set #curseq=1
insert docsequence (id,TT,SSSS,sequence) values
(newid(),#TT,#SSSS,1)
end
else
begin
update DocSequence set Sequence=#curseq+1 where
docsequence.TT=#TT and
DocSequence.SSSS=#SSSS
end
declare #curtr varchar(30)
set #curtr=RIGHT('0000' + #SSSS,4)
+ #TT
+ RIGHT('00000' + #curseq,5)
select #curtr
GO
updated code with transactions:
ALTER PROCEDURE [dbo].[GenTRNum]
#TRType nvarchar(50),
#BranchCode nvarchar(50)
AS
declare #curseq as integer
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
begin transaction
if not exists (select top 1 sequence from DocSequence where
docsequence.DocType=#trtype and
DocSequence.BranchCode=#BranchCode)
begin
insert docsequence (id,doctype,sequence,branchcode) values
(newid(),#trtype,1,#BranchCode)
end
else
begin
update DocSequence set Sequence=sequence+1 where
docsequence.DocType=#trtype and
DocSequence.BranchCode=#BranchCode
end
commit
set #curseq=(select top 1 sequence from DocSequence where
docsequence.DocType=#trtype and
DocSequence.BranchCode=#BranchCode)
declare #curtr varchar(30)
set #curtr=RIGHT('0000' + #BranchCode,4)
+ #TRType
+ RIGHT('00000' + convert(varchar(5),#curseq),5)
select #curtr
You can handle this on application level by using threading assuming you have single application Server.
Suppose you have method GetUniqueVlaue Which Executes this SP.
What you should do is use threading. that method use database transactions with readcommited. Now for example if two users have made the call to GetUniqueVlaue method at exactly 2019-08-30 10:59:38.173 time your application will make threads and Each thread will try to open transaction. only one will open that transaction on that SP and other will go on wait.
Here is how I would solve this task:
Table structure, unique indexes are important
--DROP TABLE IF EXISTS dbo.DocSequence;
CREATE TABLE dbo.DocSequence (
RowID INT NOT NULL IDENTITY(1,1)
CONSTRAINT PK_DocSequence PRIMARY KEY CLUSTERED,
BranchCode CHAR(4) NOT NULL,
DocType CHAR(2) NOT NULL,
SequenceID INT NOT NULL
CONSTRAINT DF_DocSequence_SequenceID DEFAULT(1)
CONSTRAINT CH_DocSequence_SequenceID CHECK (SequenceID BETWEEN 1 AND 999999),
)
CREATE UNIQUE INDEX UQ_DocSequence_BranchCode_DocType
ON dbo.DocSequence (BranchCode,DocType) INCLUDE(SequenceID);
GO
Procedure:
CREATE OR ALTER PROCEDURE dbo.GenTRNum
#BranchCode VARCHAR(4),
#DocType VARCHAR(2),
--
#curseq INT = NULL OUTPUT,
#curtr VARCHAR(30) = NULL OUTPUT
AS
SELECT #curseq = NULL,
#curtr = NULL,
#BranchCode = RIGHT(CONCAT('0000',#BranchCode),4),
#DocType = RIGHT(CONCAT('00',#DocType),2)
-- Atomic operation, no transaction needed
UPDATE dbo.DocSequence
SET #curseq = SequenceID += 1
WHERE DocType = #DocType
AND BranchCode = #BranchCode;
IF #curseq IS NULL -- Not found, create new one
BEGIN
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN TRAN
INSERT dbo.docsequence (doctype,branchcode)
SELECT #DocType, #BranchCode
WHERE NOT EXISTS(SELECT 1 FROM dbo.DocSequence WHERE DocType = #DocType AND BranchCode = #BranchCode)
IF ##ROWCOUNT = 1
BEGIN
COMMIT;
SET #curseq = 1
END
ELSE
BEGIN
ROLLBACK;
UPDATE dbo.DocSequence
SET #curseq = SequenceID += 1
WHERE DocType = #DocType
AND BranchCode = #BranchCode;
END
END
SET #curtr = #BranchCode + #DocType + RIGHT(CONCAT('00000',#curseq),5)
RETURN
GO
I did some tests to make sure is it works as described. You can use it if you need
-- Log table just for test
-- DROP TABLE IF EXISTS dbo.GenTRNumLog;
CREATE TABLE dbo.GenTRNumLog(
RowID INT NOT NULL IDENTITY(1,1) PRIMARY KEY CLUSTERED,
SPID SMALLINT NOT NULL,
Cycle INT NULL,
dt DATETIME NULL,
sq INT NULL,
tr VARCHAR(30) NULL,
DurationMS INT NULL
)
This script should be opened in several separate MS SQL Management Studio windows and run they almost simultaneously
-- Competitive insertion test, run it in 5 threads simultaneously
SET NOCOUNT ON
DECLARE
#dt DATETIME,
#start DATETIME,
#DurationMS INT,
#Cycle INT,
#BC VARCHAR(4),
#DC VARCHAR(2),
#SQ INT,
#TR VARCHAR(30);
SELECT #Cycle = 0,
#start = GETDATE();
WHILE DATEADD(SECOND, 60, #start) > GETDATE() -- one minute test, new #DocType every second
BEGIN
SET #dt = GETDATE();
SELECT #BC = FORMAT(#dt,'HHmm'), -- Hours + Minuts as #BranchCode
#DC = FORMAT(#dt,'ss'), -- seconds as #DocType
#Cycle += 1
EXEC dbo.GenTRNum #BranchCode = #BC, #DocType = #Dc, #curseq = #SQ OUTPUT, #curtr = #TR OUTPUT
SET #DurationMS = DATEDIFF(ms, #dt, GETDATE());
INSERT INTO dbo.GenTRNumLog (SPID, Cycle , dt, sq, tr, DurationMS)
SELECT SPID = ##SPID, Cycle = #Cycle, dt = #dt, sq = #SQ, tr = #TR, DurationMS = #DurationMS
END
/*
Check test results
SELECT *
FROM dbo.DocSequence
SELECT sq = MAX(sq), DurationMS = MAX(DurationMS)
FROM dbo.GenTRNumLog
SELECT * FROM dbo.GenTRNumLog
ORDER BY tr
*/

How to insert multiple rows into a table based on a range of numbers

I have to insert a specific number of rows into a SQL Server table.
DECLARE #val AS INT = 20,
#val2 AS VARCHAR(50),
#Date AS DATETIME = CONVERT(DATETIME,'02-05-2016'),
#i AS INT = 0
SET #val2 = 'abc'
DECLARE #tbl TABLE
(
[ID] [int] IDENTITY(1,1) NOT NULL,
[val2] VARCHAR(50) NULL,
[datum] [datetime] NULL
)
--INSERT INTO #tbl
SELECT #val2, DATEADD(DAY, #i, #Date)
UNION ALL
SELECT #val2, DATEADD(DAY, #i, #Date)
In this query, I have to insert dates starting from a given date till the number of value assigned to the variable '#val'. So, in this case, 20 rows need to be inserted into the table starting from '02-05-2016' and then date increasing 1 day for each row.
How can I do it in a single statement without any looping or multiple insert statements?
You can use a numbers table if you have one, use master.dbo.spt_values if you want one that has values till 2048, or create one of your own. In this case, you could use master.dbo.spt_values:
DECLARE #val AS INT=20, #val2 AS VARCHAR(50);
DECLARE #Date AS DATETIME = CONVERT(DATETIME,'02-05-2016');
SET #val2 = 'abc'
INSERT INTO dbo.YourTable
SELECT #val2, DATEADD(DAY,number,#Date)
FROM master.dbo.spt_values
WHERE type = 'P'
AND number <= #val;
Though since this starts at zero, you'll get 21 rows as a result
Besides the detailed answer I pointed to in my comment, this is the idea in short:
DECLARE #start INT=0;
DECLARE #end INT=19; --0 to 19 are 20 days
DECLARE #StartDate DATE={d'2016-01-01'};
--Create a List of up to 1.000.000.000 rows on the fly
--This is limited by start and end parameter
;WITH x AS(SELECT 1 AS N FROM(VALUES(1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS tbl(N))--10^1
,N3 AS (SELECT 1 AS N FROM x CROSS JOIN x AS N2 CROSS JOIN x N3) --10^3
,Tally AS(SELECT TOP(#end-#start +1) ROW_NUMBER() OVER(ORDER BY(SELECT NULL)) + #start -1 AS Nr FROM N3
CROSS JOIN N3 N6 CROSS JOIN N3 AS N9)
--INSERT INTO your_table
SELECT #val2 --your #val2 here as a constant value
,DATEADD(DAY,Nr,#StartDate)
FROM Tally
You could use a recursive CTE.
DECLARE #i INT = 1
, #m INT = 19
, #d DATETIME2 = '2016-05-02';
WITH i AS (
SELECT 0 AS increment
UNION ALL
SELECT i.increment + #i
FROM i
WHERE i.increment < #m
)
SELECT i.increment
, DATEADD(DAY, i.increment, #d)
FROM i
OPTION (MAXRECURSION 100);
Note the OPTION (MAXRECUSION 100) hint at the bottom, which is not strictly necessary but I have included it to illustrate how it works. By default, there is a limit of 100 results using this method, so without this statement and if #m were a large number e.g. 1000 then SQL would generate an error. You can set the lmit to 0 which means unbounded, but only do this after testing your code, because it can get stuck in an infinite loop this way (which is why the limit exists by default).

Check Constraint not firing correctly

I'm pretty sure I am doing something wrong, as this is my first check constraint, but I can't understand why it's not working. I need to check that a date range doesn't overlap.
ALTER FUNCTION fn_DateOverlaps (#StartDate DATE, #EndDate DATE, #ProjectID INT)
RETURNS BIT
AS
BEGIN
DECLARE #Ret BIT
SET #Ret = 1
IF NOT EXISTS(
SELECT * FROM project_sprint
WHERE ((#StartDate >= StartDate AND #EndDate <= EndDate)
OR (#StartDate <= StartDate AND #EndDate >= EndDate))
AND ProjectId = #ProjectId
)
BEGIN
SET #Ret = 0
END
RETURN #Ret
END
GO
I then apply this to my table:
ALTER TABLE Project_Sprint WITH CHECK ADD CONSTRAINT ck_DateOverlaps CHECK (dbo.fn_DateOverlaps([StartDate], [EndDate], [ProjectId])=1)
GO
When I test the function, I get a good result:
SELECT dbo.fn_DateOverlaps('2013-06-10', '2013-06-13', 1)
But then when I apply the same date range and project ID to my table, it allows the insert. It should fail it.
What am I doing wrong?
If you change SELECT * FROM project_sprint to SELECT * FROM dbo.project_sprint the function will correctly evaluate, but you won't get the desired behavior, since the value you are inserting or editing will lead to an unwanted find.
To prevent this you will have to add an additional ID field to except the row you want to edit/insert for the check.
Create Table Project_Sprint(ID int IDENTITY(1,1) NOT NULL,ProjectID int,StartDate DateTime,EndDate DateTime)
go
Alter FUNCTION fn_DateOverlaps (#ID int,#StartDate DATE, #EndDate DATE, #ProjectID INT)
RETURNS BIT
AS
BEGIN
DECLARE #Ret BIT
SET #Ret = 0
IF NOT EXISTS(
SELECT * FROM dbo.project_sprint
WHERE
#ID<>ID AND
((#StartDate >= StartDate AND #EndDate <= EndDate)
OR (#StartDate <= StartDate AND #EndDate >= EndDate))
AND ProjectId = #ProjectId
)
BEGIN
SET #Ret = 1
END
RETURN #Ret
END
GO
ALTER TABLE Project_Sprint ADD CONSTRAINT ck_DateOverlaps CHECK (dbo.fn_DateOverlaps([ID],[StartDate], [EndDate], [ProjectId])=1)

SQL Server OUTPUT clause

I am a little stuck with why I can not seem to get the 'new identity' of the inserted row with the statement below. SCOPE_IDENTITY() just returns null.
declare #WorkRequestQueueID int
declare #LastException nvarchar(MAX)
set #WorkRequestQueueID = 1
set #LastException = 'test'
set nocount off
DELETE dbo.WorkRequestQueue
OUTPUT
DELETED.MessageEnvelope,
DELETED.Attempts,
#LastException,
GetUtcdate(), -- WorkItemPoisened datetime
DELETED.WorkItemReceived_UTC
INTO dbo.FaildMessages
FROM dbo.WorkRequestQueue
WHERE
WorkRequestQueue.ID = #WorkRequestQueueID
IF ##ROWCOUNT = 0
RAISERROR ('Record not found', 16, 1)
SELECT Cast(SCOPE_IDENTITY() as int)
Any assistance would be most appreciated.
For now I use a workaround this like so.
declare #WorkRequestQueueID int
declare #LastException nvarchar(MAX)
set #WorkRequestQueueID = 7
set #LastException = 'test'
set nocount on
set xact_abort on
DECLARE #Failed TABLE
(
MessageEnvelope xml,
Attempts smallint,
LastException nvarchar(max),
WorkItemPoisened_UTC datetime,
WorkItemReceived_UTC datetime
)
BEGIN TRAN
DELETE dbo.WorkRequestQueue
OUTPUT
DELETED.MessageEnvelope,
DELETED.Attempts,
#LastException,
GetUtcdate(), -- WorkItemPoisened datetime
DELETED.WorkItemReceived_UTC
INTO
#Failed
FROM
dbo.WorkRequestQueue
WHERE
WorkRequestQueue.ID = #WorkRequestQueueID
IF ##ROWCOUNT = 0 BEGIN
RAISERROR ('Record not found', 16, 1)
Rollback
END ELSE BEGIN
insert into dbo.FaildMessages select * from #Failed
COMMIT TRAN
SELECT Cast(SCOPE_IDENTITY() as int)
END
EDITED FEB'2013
#MartinSmith alerts us that this bug don't want be fixed by Microsoft.
"Posted by Microsoft on 2/27/2013 at 2:18 PM Hello Martin, We
investigated the issue and found that changing the behavior is not an
easy thing to do. It would basically require redefining some of the
behavior when both INSERT & OUTPUT INTO target has identity columns.
Given the nature of the problem & the uncommon scenario, we have
decided not to fix the issue. -- Umachandar, SQL Programmability
Team"
EDITED OCT'2012
This is caused by a bug:
Testing bug:
Quoting OUTPUT Clause doc:
##IDENTITY, SCOPE_IDENTITY, and IDENT_CURRENT return identity values
generated only by the nested DML statement, and not those generated by
the outer INSERT statement.
After test it It seems that scope_identity() only works if outer operation is an insert in a table with identity columns:
Test 1: Delete
create table #t ( a char(1) );
create table #d ( a char(1), i int identity );
insert into #t
values ('a'),('b'),('c');
delete #t
output deleted.a into #d;
select SCOPE_IDENTITY(), * from #d;
a i
---- - -
null a 1
null b 2
null c 3
Test 2: Inserting in outer table with identity
create table #t ( a char(1), i int identity );
create table #d ( a char(1), i int identity );
insert into #t
values ('x'),('x'),('x');
insert into #t
output inserted.a into #d
values ('a'),('b');
select scope_identity(), * from #d;
a i
- - -
2 a 1
2 b 2
Test 3: Inserting in outer table without identity
create table #t ( a char(1) );
create table #d ( a char(1), i int identity );
insert into #t
values ('x'),('x'),('x');
insert into #t
output inserted.a into #d
values ('a'),('b');
select scope_identity(), * from #d;
a i
---- - -
null a 1
null b 2
You might try to use a table variable for your output clause, thus allowing you to explicitly insert into FaildMessages:
declare #WorkRequestQueueID int
declare #LastException nvarchar(MAX)
set #WorkRequestQueueID = 1
set #LastException = 'test'
set nocount off
-- Declare a table variable to capture output
DECLARE #output TABLE (
MessageEnvelope VARCHAR(50), -- Guessing at datatypes
Attempts INT, -- Guessing at datatypes
WorkItemReceived_UTC DATETIME -- Guessing at datatypes
)
-- Run the deletion with output
DELETE dbo.WorkRequestQueue
OUTPUT
DELETED.MessageEnvelope,
DELETED.Attempts,
DELETED.WorkItemReceived_UTC
-- Use the table var
INTO #output
FROM dbo.WorkRequestQueue
WHERE
WorkRequestQueue.ID = #WorkRequestQueueID
-- Explicitly insert
INSERT
INTO dbo.FaildMessages
SELECT
MessageEnvelope,
Attempts,
#LastException,
GetUtcdate(), -- WorkItemPoisened datetime
WorkItemReceived_UTC
FROM #output
IF ##ROWCOUNT = 0
RAISERROR ('Record not found', 16, 1)
SELECT Cast(SCOPE_IDENTITY() as int)

How to expand rows from count in tsql only

I have a table that contains a number and a range value. For instance, one column has the value of 40 and the other column has a value of 100 meaning that starting 40 the range has 100 values ending in 139 inclusive of the number 40. I want to write a tsql statement that expands my data into individual rows.
I think I need a cte for this but do not know how I can achieve this.
Note: when expanded I am expecting 7m rows.
If you want CTE here is an example:
Initial insert:
insert into rangeTable (StartValue, RangeValue)
select 40,100
union all select 150,10
go
the query:
with r_CTE (startVal, rangeVal, generatedVal)
as
(
select r.startValue, r.rangeValue, r.startValue
from rangeTable r
union all
select r.startValue, r.rangeValue, generatedVal+1
from rangeTable r
inner join r_CTE rc
on r.startValue = rc.startVal
and r.rangeValue = rc.rangeVal
and r.startValue + r.rangeValue > rc.generatedVal + 1
)
select * from r_CTE
order by startVal, rangeVal, generatedVal
Just be aware that the default maximum number of recursions is 100. You can change it to the maximum of 32767 by calling
option (maxrecursion 32767)
or to no limit
option (maxrecursion 0)
See BOL for details
I don't know how this could be done with common table expressions, but here is a solution using a temporary table:
SET NOCOUNT ON
DECLARE #MaxValue INT
SELECT #MaxValue = max(StartValue + RangeValue) FROM MyTable
DECLARE #Numbers table (
Number INT IDENTITY(1,1) PRIMARY KEY
)
INSERT #Numbers DEFAULT VALUES
WHILE COALESCE(SCOPE_IDENTITY(), 0) <= #MaxValue
INSERT #Numbers DEFAULT VALUES
SELECT n.Number
FROM #Numbers n
WHERE EXISTS(
SELECT *
FROM MyTable t
WHERE n.Number BETWEEN t.StartValue AND t.StartValue + t.RangeValue - 1
)
SET NOCOUNT OFF
Could be optimized if the Numbers table was a regular table. So you don't have to fill the temporary table on every call.
you could try this approach:
create function [dbo].[fRange](#a int, #b int)
returns #ret table (val int)
as
begin
declare #val int
declare #end int
set #val = #a
set #end = #a + #b
while #val < #end
begin
insert into #ret(val)
select #val
set #val = #val+1
end
return
end
go
declare #ranges table(start int, noOfEntries int)
insert into #ranges (start, noOfEntries)
select 40,100
union all select 150, 10
select * from #ranges r
cross apply dbo.fRange(start,noOfEntries ) fr
not the fastest but should work
I would do something slightly different from splattne...
SET NOCOUNT ON
DECLARE #MaxValue INT
DECLARE #Numbers table (
Number INT IDENTITY(1,1) PRIMARY KEY CLUSTERED
)
SELECT #MaxValue = max(RangeValue) FROM MyTable
INSERT #Numbers DEFAULT VALUES
WHILE COALESCE(SCOPE_IDENTITY(), 0) <= #MaxValue
INSERT #Numbers DEFAULT VALUES
SELECT
t.startValue + n.Number
FROM
MyTable t
INNER JOIN
#Numbers n
ON n.Number < t.RangeValue
SET NOCOUNT OFF
This will minimise the number of rows you need to insert into the table variable, then use a join to 'multiply' one table by the other...
By the nature of the query, the source table table doesn't need indexing, but the "numbers" table should have an index (or primary key). Clustered Indexes refer to how they're stored on the Disk, so I can't see CLUSTERED being relevant here, but I left it in as I just copied from Splattne.
(Large joins like this may be slow, but still much faster than millions of inserts.)

Resources