I have this pattern in a number of stored procedures
-- Table1
[id] [int] IDENTITY(1,1) NOT NULL
[data] [varchar](512) NULL
[count] INT NULL
-- 'data' is unique, with a unique index on 'data' in 'Table1'
BEGIN TRY
INSERT INTO Table1 (data, count) SELECT #data,1;
END TRY
BEGIN CATCH
UPDATE Table1 SET count = count + 1 WHERE data = #data;
END CATCH
I've been slammed before for using this pattern
You should never have exception "catching" in your normal logic flow. (Thus why it is called an "exception"..it should be exceptional (rare). Put a exists check around your INSERT. "if not exists (select null from Data where data = #data) begin /* insert here */ END
However, I can't see a way around it in this instance. Consider the following alternative approaches.
INSERT INTO Table1 (data,count)
SELECT #data,1 WHERE NOT EXISTS
(SELECT 1 FROM Table1 WHERE data = #data)
If I do this, it means every insert is unique, but I can't 'catch' an update condition.
DECLARE #id INT;
SET #id = (SELECT id FROM Table1 WHERE data = #data)
IF(#id IS NULL)
INSERT INTO Table1 (data, count) SELECT #data,1;
ELSE
UPDATE Table1 SET count = count + 1 WHERE data = #data;
If I do this, I have a race condition between the check and the insert, so I could have duplicates inserted.
BEGIN TRANSACTION
DECLARE #id INT;
SET #id = (SELECT id FROM Table1 WHERE data = #data)
IF(#id IS NULL)
INSERT INTO Table1 (data, count) SELECT #data,1;
ELSE
UPDATE Table1 SET count = count + 1 WHERE data = #data;
END TRANSACTION
If I wrap this in a TRANSACTION it adds more overhead. I know TRY/CATCH also brings overhead but I think TRANSACTION adds more - anyone know?.
People keep telling me that using TRY/CATCH in normal app logic is BAD, but won't tell me why
Note: I'm running SQL Server 2005 on at least one box, so I can't use MERGE
Try to update and if it's failed - to insert new.
BEGIN TRANSACTION
UPDATE t
SET
t.count = t.count + 1
FROM Table1 t
WHERE t.data = #data
IF (##ROWCOUNT = 0)
BEGIN
INSERT INTO Table1
(data, count)
VALUES
(#data, 1)
END
COMMIT TRANSACTION
The explicit transaction is the cost of doing business with a conditional INSERT/UPDATE in order to address concurrency. The example below uses locking hints to a avoid race condition with this code.
BEGIN TRANSACTION;
INSERT INTO Table1
( data
, count
)
SELECT #data
, 1
WHERE NOT EXISTS ( SELECT 1
FROM Table1 WITH ( UPDLOCK, HOLDLOCK )
WHERE data = #data );
IF ##ROWCOUNT = 0
UPDATE Table1
SET count = count + 1
WHERE data = #data;
COMMIT;
If the more common path is the UPDATE, try that first followed by the conditional INSERT.
Related
I have a script that I created that works fine in SQL Server, but now I need to make it work in Oracle and I am having issues getting the script converted.
Here is my SQL Server Script:
-- run the commented out statement in another query analyzer window to stop the script
-- update ##stopsign set val = 1
set nocount on
--declare variables
declare #morework int
declare #archivecount int
declare #stopsign int
--if working tables exists clear them, if not create and initialize them
if (object_id('tempdb..##stopsign') is null)
create table ##stopsign (val int)
else
delete from ##stopsign
insert into ##stopsign values (0)
if (object_id('tempdb..#tempdins') is null)
create table #tempdins (tempdin varchar(255) not null, processed int null)
else
delete from #tempdins
--initialize #tempdins working table with all the records eligible to be unarchived
--edit the select statement if needed to change the records to be unarchived
insert into #tempdins(tempdin)
select tempdin
from document
where archivestatus = 'C'
and status = 'U'
option (MAXDOP 1)
--inialize variables with current values
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
select #morework = 1
--while there is more to do, unarchs table has less then 1000 records, and the stopsign value is 0 loop
while (#morework >= 1 and #stopsign = 0)
begin
if #archivecount <1000
begin
-- number to be processed at once
-- change this value if you would like to dump more in to the unarchs table at once
set rowcount 100
update #tempdins
set processed = 0
where processed is null
--reset rowcount
set rowcount 0
--populate the unarchs table with valid values
--this will unarchive at the page (lowest) level
insert into unarchs (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from document
where tempdin in (select tempdin
from #tempdins
where processed = 0)
--update with rowcount to see if finished
select #morework = ##rowcount
--set the tempdins to processed in working table
update #tempdins
set processed = 1
where processed = 0
--get new counts for variables for evaulation
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
else
begin
--get new counts for variables for evaulation
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
end
set nocount off
Here is what I have for ORACLE so far (writing in PL/SQL):
-- run the commented out statement in another query analyzer window to stop the script
-- update ##stopsign set val = 1
--if working tables exists clear them, if not create and initialize them
declare
v_sql LONG;
begin
v_sql:='CREATE GLOBAL TEMPORARY TABLE STOPSIGN;
(
VAL int
)';
execute immediate v_sql;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -955 THEN
NULL; -- suppresses ORA-00955 exception
ELSE
delete from STOPSIGN;
END IF;
END;
/
insert into STOPSIGN values (0);
--if working tables exists clear them, if not create and initialize them
declare
v_sql LONG;
begin
v_sql:='CREATE GLOBAL TEMPORARY TABLE TEMPDINS;
(
tempdin varch(255),
processed int null
)';
execute immediate v_sql;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -955 THEN
NULL; -- suppresses ORA-00955 exception
ELSE
delete from TEMPDINS;
END IF;
END;
/
--initialize #tempdins working table with all the records eligible to be unarchived
--edit the select statement if needed to change the records to be unarchived
insert into TEMPDINS(tempdin)
select * from (select tempdin
from document join packtype on packtype.packagetype=document.packagetype
and archivestatus = 'C' and ADRIVE is not null
and ADRIVE <> DRIVE ) where ROWNUM < 10;
--inialize variables with current values
Declare
archivecount int;
stopsign int;
morework int;
Begin
Select count(*) INTO archivecount from UNARCHS;
Select VAL into stopsign from STOPSIGN;
morework := 1;
END
--while there is more to do, unarchs table has less then 1000 records, and the stopsign value is 0 loop
WHILE morework > 0 and stopsign = 0
LOOP{
begin
if archivecount <1000
begin
-- number to be processed at once
-- change this value if you would like to dump more in to the unarchs table at once
set rowcount 100
update TEMPDINS
set processed = 0
where processed is null
}
--reset rowcount
set rowcount 0
--populate the unarchs table with valid values
--this will unarchive at the page (lowest) level
insert into UNARCHS (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from DOCUMENT
where tempdin in (select tempdin
from TEMPDINS
where processed = 0)
--update with rowcount to see if finished
select morework = select NUM_ROWS into morework from user_tables where table_name = 'UNARCHS'
--set the tempdins to processed in working table
update TEMPDINS
set processed = 1
where processed = 0
--get new counts for variables for evaulation
select archivecount = (select count(*) from unarchs)
select stopsign = (select val from STOPSIGN)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
else
begin
--get new counts for variables for evaulation
select archivecount = (select count(*) from unarchs)
select stopsign = (select val from STOPSIGN)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
end
End
END IF
END LOOP
Any help would be appreciated. My company has no Oracle resources for me to go to and Google is getting tired of me.
I think you can get rid of everything and just do a single insert statement:
insert into unarchs (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from DOCUMENT
where archivestatus = 'C'
And status = 'U';
I’m assuming you have a different process that updates the rows in the document table so this process doesn’t constantly pick up the same rows?
I have a code below that should insert records into the table but unfortunately this code foes not work in case multiple records are inserted or updated or deleted. How should I rewrite the code for procedure to loop through all the inserted / deleted records? And I do need to use that stored procedure with Input parameters (not just simple insert into ... select ... from ...)
IF EXISTS (SELECT * FROM MyDB.sys.triggers WHERE object_id = OBJECT_ID(N'[dbo].[MyTable_DEL_UPD_INS]'))
DROP TRIGGER [dbo].[MyTable_DEL_UPD_INS]
GO
CREATE TRIGGER [dbo].[MyTable_DEL_UPD_INS]
ON [MyDB].[dbo].[MyTable]
AFTER DELETE, UPDATE, INSERT
NOT FOR REPLICATION
AS
BEGIN
DECLARE #PKId INT,
#Code VARCHAR(5),
#AuditType VARCHAR(10)
SET #Code = 'TEST'
IF EXISTS (SELECT * FROM deleted d)
AND NOT EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'DELETE'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
IF EXISTS (SELECT * FROM deleted d)
AND EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'UPDATE'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
IF NOT EXISTS (SELECT * FROM deleted d)
AND EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'INSERT'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
END
GO
ALTER TABLE [MyDB].[dbo].[MyTable] ENABLE TRIGGER [MyTable_DEL_UPD_INS]
You should avoid using loops in triggers.
Triggers should be as quick to run as possible, since SQL Server will not return control to whatever statement that fired the trigger until the trigger is completed.
So instead of a loop, you should modify your SP_Audit procedure to work with multiple records instead of a single one.
usually, this is easily be done using a table valued parameter.
If you could post the SP_Audit as well, we could give you a complete solution.
Since you didn't post it, you can use these guidelines as a start:
First, you create a user defined table type:
CREATE TYPE dbo.Ids AS TABLE
(
Id int NOT NULL PRIMARY KEY
)
GO
Then, you create the procedure to use it:
CREATE PROCEDURE [dbo].[STP_Audit_MultipleRecords]
(
#IDs dbo.Ids readonly,
#Code CHAR(4),
#AuditType CHAR(6)
)
AS
-- Implementation here
GO
Last, your write your trigger like this:
CREATE TRIGGER [dbo].[MyTable_DEL_UPD_INS]
ON [MyDB].[dbo].[MyTable]
AFTER DELETE, UPDATE, INSERT
NOT FOR REPLICATION
AS
BEGIN
DECLARE #HasDeleted bit = 0,
#HasInserted bit = 0,
#AuditType CHAR(6),
#Code CHAR(4)
SET #Code = 'TEST'
DECLARE #IDs as dbo.Ids
IF EXISTS (SELECT * FROM deleted d)
SET #HasDeleted = 1
IF EXISTS (SELECT * FROM inserted i)
SET #HasInserted = 1
IF #HasDeleted = 1
BEGIN
IF #HasInserted = 1
BEGIN
SET #AuditType = 'UPDATE'
END
ELSE
BEGIN
SET #AuditType = 'DELETE'
END
END
ELSE
IF #HasInserted = 1
BEGIN
SET #AuditType = 'INSERT'
END
INSERT INTO #IDs (Id)
SELECT [MyTable_PK]
FROM inserted
UNION
SELECT [MyTable_PK]
FROM deleted
EXEC [dbo].[STP_Audit_MultipleRecords] #IDs, #Code, #AuditType
END
GO
Notes:
The #HasDeleted and #HasInserted variables are to allow you to only execute the EXISTS query once for every procedure.
Getting the primary key values from the deleted and inserted table is done using a single union query. Since union eliminates duplicate values, you can write this query just once. If you want to, you can write a different query for each audit type, but then you will have to repeat the same query 3 times (with different tables)
I've changed the data types of your #code and #AuditType variables to char, since they have a fixed length.
I have written the trigger below that prevents from NULL being entered in the pch_x field . It works fine if i insert 1 row but doesnt work if I enter more than one at once . Could someone please help me out a little ? Here is my code
create trigger test
ON [dbo].TEMP
for INSERT
AS
BEGIN
declare #xcheck varchar(50)
set #xcheck= (select i.pch_x FROM temp L INNER JOIN INSERTED I
ON L.id = I.id)
F (#xcheck is NULL )
begin
RAISERROR('NULL in pch_x', 16, 1)
ROLLBACK
end
END
I'm not sure why you're doing this in a trigger, but the set based way to do this test would be to use EXISTS:
create trigger test
ON [dbo].TEMP
for INSERT
AS
BEGIN
IF EXISTS(select * FROM temp L INNER JOIN
INSERTED I
ON L.id = I.id
where i.pch_x IS NULL)
begin
RAISERROR('NULL in pch_x', 16, 1)
ROLLBACK
end
END
I'm also not sure why you're joining back to the table - I'd have thought the check could run without reference to temp:
create trigger test
ON [dbo].TEMP
for INSERT
AS
BEGIN
IF EXISTS(select * FROM INSERTED
where pch_x IS NULL)
begin
RAISERROR('NULL in pch_x', 16, 1)
ROLLBACK
end
END
For you unusual requirement that, in a rowset containing some rows with nulls, you want success for those rows without nulls and failure for those rows with nulls, most sensible would be an INSTEAD OF trigger:
create trigger test
ON [dbo].TEMP
INSTEAD OF INSERT
AS
BEGIN
declare #rc int
INSERT INTO dbo.temp (/* column list */)
SELECT /* column list */ from inserted where pch_x IS NOT NULL
set #rc = ##ROWCOUNT
IF #rc <> (select COUNT(*) from inserted)
begin
RAISERROR('NULL in pch_x', 16, 1)
--ROLLBACK
end
END
I need to write a single statement to insert or update a record in a single record table
the merge statement allows me to write this:
create table t1 (n int)
-- insert into t1 (n) Values (1); -- uncomment to test the matched branch
MERGE t1 AS P
USING (SELECT 3 AS n) AS S
ON 1 = 1
WHEN MATCHED THEN
UPDATE SET n = S.n
WHEN NOT MATCHED THEN
INSERT (n)
VALUES (S.n);
select * from t1
this work, but I think that the 1=1 condition purpose is not very easy to understand.
Is there a different syntax to insert a record when the table is empty or update the record when it does already exist?
The other option would be to do it the old fashioned way.
if exists (select null from t1)
update t1 set n = 3
else
insert into t1 (n) values (3)
Replace
ON 1 = 1
with
ON S.n = P.n
Example of recent procedure I wrote to either update an existing row or insert a new row.
Table has the same structure as MembershipEmailFormat the table variable.
Found it easiest to create a table variable to be the source in the Using clause. I realize that the main purpose of Merge statements really are merging muliple rows between two tables. My use case is that I need to insert a new email address for a user or modify and existing email address.
CREATE PROCEDURE [dbo].[usp_user_merge_emailformat]
#UserID UNIQUEIDENTIFIER,
#Email varchar(256),
#UseHTML bit
AS
BEGIN
--SELECT #UserID='04EFF187-AEAC-408E-9FA8-284B31890FBD',
-- #Email='person#xxxx.com',
-- #UseHTML=0
DECLARE #temp TABLE
(
UserID UNIQUEIDENTIFIER,
Email varchar(256),
HtmlFormat bit
)
INSERT INTO #temp(UserID,Email, HtmlFormat)
Values(#UserID,#Email,#UseHTML)
SELECT * FROM #temp
MERGE dbo.MembershipEmailFormat as t
USING #temp AS s
ON (t.UserID = s.UserID and t.Email = s.Email)
WHEN MATCHED THEN UPDATE SET t.HtmlFormat = s.HtmlFormat
WHEN NOT MATCHED THEN INSERT VALUES(s.UserID,s.Email,s.HtmlFormat);
END
Consider the following SQL:
CREATE TABLE Foo
(
ID int IDENTITY(1,1),
Data nvarchar(max)
)
INSERT INTO Foo (Data)
SELECT TOP 1000 Data
FROM SomeOtherTable
WHERE SomeColumn = #SomeParameter
DECLARE #LastID int
SET #LastID = SCOPE_IDENTITY()
I would like to know if I can depend on the 1000 rows that I inserted into table Foo having contiguous identity values. In order words, if this SQL block produces a #LastID of 2000, can I know for certain that the ID of the first record I inserted was 1001? I am mainly curious about multiple statements inserting records into table Foo concurrently.
I know that I could add a serializable transaction around my insert statement to ensure the behavior that I want, but do I really need to? I'm worried that introducing a serializable transaction will degrade performance, but if SQL Server won't allow other statements to insert into table Foo while this statement is running, then I don't have to worry about it.
I disagree with the accepted answer. This can easily be tested and disproved by running the following.
Setup
USE tempdb
CREATE TABLE Foo
(
ID int IDENTITY(1,1),
Data nvarchar(max)
)
Connection 1
USE tempdb
SET NOCOUNT ON
WHILE NOT EXISTS(SELECT * FROM master..sysprocesses WHERE context_info = CAST('stop' AS VARBINARY(128) ))
BEGIN
INSERT INTO Foo (Data)
VALUES ('blah')
END
Connection 2
USE tempdb
SET NOCOUNT ON
SET CONTEXT_INFO 0x
DECLARE #Output TABLE(ID INT)
WHILE 1 = 1
BEGIN
/*Clear out table variable from previous loop*/
DELETE FROM #Output
/*Insert 1000 records*/
INSERT INTO Foo (Data)
OUTPUT inserted.ID INTO #Output
SELECT TOP 1000 NEWID()
FROM sys.all_columns
IF EXISTS(SELECT * FROM #Output HAVING MAX(ID) - MIN(ID) <> 999 )
BEGIN
/*Set Context Info so other connection inserting
a single record in a loop terminates itself*/
DECLARE #stop VARBINARY(128)
SET #stop = CAST('stop' AS VARBINARY(128))
SET CONTEXT_INFO #stop
/*Return results for inspection*/
SELECT ID, DENSE_RANK() OVER (ORDER BY Grp) AS ContigSection
FROM
(SELECT ID, ID - ROW_NUMBER() OVER (ORDER BY [ID]) AS Grp
FROM #Output) O
ORDER BY ID
RETURN
END
END
Yes, they will be contiguous because the INSERT is atomic: complete success or full rollback. It is also performed as a single unit of work: you wont get any "interleaving" with other processes
However (or to put your mind at rest!), consider the OUTPUT clause
DECLARE #KeyStore TABLE (ID int NOT NULL)
INSERT INTO Foo (Data)
OUTPUT INSERTED.ID INTO #KeyStore (ID) --this line
SELECT TOP 1000 Data
FROM SomeOtherTable
WHERE SomeColumn = #SomeParameter
If you want the Identity values for multiple rows use OUTPUT:
DECLARE #NewIDs table (PKColumn int)
INSERT INTO Foo (Data)
OUTPUT INSERTED.PKColumn
INTO #NewIDs
SELECT TOP 1000 Data
FROM SomeOtherTable
WHERE SomeColumn = #SomeParameter
you now have the entire set of values in the #NewIDs table. You can add any columns from the Foo table into the #NewIDs table and insert those columns as well.
It is not good practice to attach any sort of meaning whatsoever to identity values. You should assume that they are nothing more than integers guaranteed to be unique within the scope of your table.
Try adding the following:
option(maxdop 1)