I have a script that I created that works fine in SQL Server, but now I need to make it work in Oracle and I am having issues getting the script converted.
Here is my SQL Server Script:
-- run the commented out statement in another query analyzer window to stop the script
-- update ##stopsign set val = 1
set nocount on
--declare variables
declare #morework int
declare #archivecount int
declare #stopsign int
--if working tables exists clear them, if not create and initialize them
if (object_id('tempdb..##stopsign') is null)
create table ##stopsign (val int)
else
delete from ##stopsign
insert into ##stopsign values (0)
if (object_id('tempdb..#tempdins') is null)
create table #tempdins (tempdin varchar(255) not null, processed int null)
else
delete from #tempdins
--initialize #tempdins working table with all the records eligible to be unarchived
--edit the select statement if needed to change the records to be unarchived
insert into #tempdins(tempdin)
select tempdin
from document
where archivestatus = 'C'
and status = 'U'
option (MAXDOP 1)
--inialize variables with current values
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
select #morework = 1
--while there is more to do, unarchs table has less then 1000 records, and the stopsign value is 0 loop
while (#morework >= 1 and #stopsign = 0)
begin
if #archivecount <1000
begin
-- number to be processed at once
-- change this value if you would like to dump more in to the unarchs table at once
set rowcount 100
update #tempdins
set processed = 0
where processed is null
--reset rowcount
set rowcount 0
--populate the unarchs table with valid values
--this will unarchive at the page (lowest) level
insert into unarchs (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from document
where tempdin in (select tempdin
from #tempdins
where processed = 0)
--update with rowcount to see if finished
select #morework = ##rowcount
--set the tempdins to processed in working table
update #tempdins
set processed = 1
where processed = 0
--get new counts for variables for evaulation
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
else
begin
--get new counts for variables for evaulation
select #archivecount = (select count(*) from unarchs)
select #stopsign = (select val from ##stopsign)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
end
set nocount off
Here is what I have for ORACLE so far (writing in PL/SQL):
-- run the commented out statement in another query analyzer window to stop the script
-- update ##stopsign set val = 1
--if working tables exists clear them, if not create and initialize them
declare
v_sql LONG;
begin
v_sql:='CREATE GLOBAL TEMPORARY TABLE STOPSIGN;
(
VAL int
)';
execute immediate v_sql;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -955 THEN
NULL; -- suppresses ORA-00955 exception
ELSE
delete from STOPSIGN;
END IF;
END;
/
insert into STOPSIGN values (0);
--if working tables exists clear them, if not create and initialize them
declare
v_sql LONG;
begin
v_sql:='CREATE GLOBAL TEMPORARY TABLE TEMPDINS;
(
tempdin varch(255),
processed int null
)';
execute immediate v_sql;
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -955 THEN
NULL; -- suppresses ORA-00955 exception
ELSE
delete from TEMPDINS;
END IF;
END;
/
--initialize #tempdins working table with all the records eligible to be unarchived
--edit the select statement if needed to change the records to be unarchived
insert into TEMPDINS(tempdin)
select * from (select tempdin
from document join packtype on packtype.packagetype=document.packagetype
and archivestatus = 'C' and ADRIVE is not null
and ADRIVE <> DRIVE ) where ROWNUM < 10;
--inialize variables with current values
Declare
archivecount int;
stopsign int;
morework int;
Begin
Select count(*) INTO archivecount from UNARCHS;
Select VAL into stopsign from STOPSIGN;
morework := 1;
END
--while there is more to do, unarchs table has less then 1000 records, and the stopsign value is 0 loop
WHILE morework > 0 and stopsign = 0
LOOP{
begin
if archivecount <1000
begin
-- number to be processed at once
-- change this value if you would like to dump more in to the unarchs table at once
set rowcount 100
update TEMPDINS
set processed = 0
where processed is null
}
--reset rowcount
set rowcount 0
--populate the unarchs table with valid values
--this will unarchive at the page (lowest) level
insert into UNARCHS (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from DOCUMENT
where tempdin in (select tempdin
from TEMPDINS
where processed = 0)
--update with rowcount to see if finished
select morework = select NUM_ROWS into morework from user_tables where table_name = 'UNARCHS'
--set the tempdins to processed in working table
update TEMPDINS
set processed = 1
where processed = 0
--get new counts for variables for evaulation
select archivecount = (select count(*) from unarchs)
select stopsign = (select val from STOPSIGN)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
else
begin
--get new counts for variables for evaulation
select archivecount = (select count(*) from unarchs)
select stopsign = (select val from STOPSIGN)
--wait a second so the CPU doesn't spin
waitfor delay '00:00:01'
end
end
End
END IF
END LOOP
Any help would be appreciated. My company has no Oracle resources for me to go to and Google is getting tired of me.
I think you can get rid of everything and just do a single insert statement:
insert into unarchs (drawer,foldernumber,packageid,docid,pagenumber,unarchtype,unarchdate,unarchtime,userid,unarchdays)
select distinct drawer,foldernumber,packageid,docid,pagenumber,'Page','20061128','12:00:00','ADMIN',360
from DOCUMENT
where archivestatus = 'C'
And status = 'U';
I’m assuming you have a different process that updates the rows in the document table so this process doesn’t constantly pick up the same rows?
Related
I have a code below that should insert records into the table but unfortunately this code foes not work in case multiple records are inserted or updated or deleted. How should I rewrite the code for procedure to loop through all the inserted / deleted records? And I do need to use that stored procedure with Input parameters (not just simple insert into ... select ... from ...)
IF EXISTS (SELECT * FROM MyDB.sys.triggers WHERE object_id = OBJECT_ID(N'[dbo].[MyTable_DEL_UPD_INS]'))
DROP TRIGGER [dbo].[MyTable_DEL_UPD_INS]
GO
CREATE TRIGGER [dbo].[MyTable_DEL_UPD_INS]
ON [MyDB].[dbo].[MyTable]
AFTER DELETE, UPDATE, INSERT
NOT FOR REPLICATION
AS
BEGIN
DECLARE #PKId INT,
#Code VARCHAR(5),
#AuditType VARCHAR(10)
SET #Code = 'TEST'
IF EXISTS (SELECT * FROM deleted d)
AND NOT EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'DELETE'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
IF EXISTS (SELECT * FROM deleted d)
AND EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'UPDATE'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
IF NOT EXISTS (SELECT * FROM deleted d)
AND EXISTS (SELECT * FROM inserted i)
BEGIN
SELECT TOP 1
#PKId = d.[MyTable_PK],
#AuditType = 'INSERT'
FROM
deleted d WITH (NOLOCK)
IF #PKId IS NOT NULL
AND #Code IS NOT NULL
EXEC MyDB.[dbo].[SP_Audit] #PKId, #Code, #AuditType
END
END
GO
ALTER TABLE [MyDB].[dbo].[MyTable] ENABLE TRIGGER [MyTable_DEL_UPD_INS]
You should avoid using loops in triggers.
Triggers should be as quick to run as possible, since SQL Server will not return control to whatever statement that fired the trigger until the trigger is completed.
So instead of a loop, you should modify your SP_Audit procedure to work with multiple records instead of a single one.
usually, this is easily be done using a table valued parameter.
If you could post the SP_Audit as well, we could give you a complete solution.
Since you didn't post it, you can use these guidelines as a start:
First, you create a user defined table type:
CREATE TYPE dbo.Ids AS TABLE
(
Id int NOT NULL PRIMARY KEY
)
GO
Then, you create the procedure to use it:
CREATE PROCEDURE [dbo].[STP_Audit_MultipleRecords]
(
#IDs dbo.Ids readonly,
#Code CHAR(4),
#AuditType CHAR(6)
)
AS
-- Implementation here
GO
Last, your write your trigger like this:
CREATE TRIGGER [dbo].[MyTable_DEL_UPD_INS]
ON [MyDB].[dbo].[MyTable]
AFTER DELETE, UPDATE, INSERT
NOT FOR REPLICATION
AS
BEGIN
DECLARE #HasDeleted bit = 0,
#HasInserted bit = 0,
#AuditType CHAR(6),
#Code CHAR(4)
SET #Code = 'TEST'
DECLARE #IDs as dbo.Ids
IF EXISTS (SELECT * FROM deleted d)
SET #HasDeleted = 1
IF EXISTS (SELECT * FROM inserted i)
SET #HasInserted = 1
IF #HasDeleted = 1
BEGIN
IF #HasInserted = 1
BEGIN
SET #AuditType = 'UPDATE'
END
ELSE
BEGIN
SET #AuditType = 'DELETE'
END
END
ELSE
IF #HasInserted = 1
BEGIN
SET #AuditType = 'INSERT'
END
INSERT INTO #IDs (Id)
SELECT [MyTable_PK]
FROM inserted
UNION
SELECT [MyTable_PK]
FROM deleted
EXEC [dbo].[STP_Audit_MultipleRecords] #IDs, #Code, #AuditType
END
GO
Notes:
The #HasDeleted and #HasInserted variables are to allow you to only execute the EXISTS query once for every procedure.
Getting the primary key values from the deleted and inserted table is done using a single union query. Since union eliminates duplicate values, you can write this query just once. If you want to, you can write a different query for each audit type, but then you will have to repeat the same query 3 times (with different tables)
I've changed the data types of your #code and #AuditType variables to char, since they have a fixed length.
I want to update a table in SQL Server by setting a FLAG column to 1 for all values since the beginning of the year:
TABLE
DATE ID FLAG (more columns...)
2016/01/01 1 0 ...
2016/01/01 2 0 ...
2016/01/02 3 0 ...
2016/01/02 4 0 ...
(etc)
Problem is that this table contains hundreds of millions of records and I've been advised to chunk the updates 100,000 rows at a time to avoid blocking other processes.
I need to remember which rows I update because there are background processes which immediately flip the FLAG back to 0 once they're done processing it.
Does anyone have suggestions on how I can do this?
Each day's worth of data has over a million records, so I can't simply loop using the DATE as a counter. I am thinking of using the ID
Assuming the date column and the ID column are sequential you could do a simple loop. By this I mean that if there is a record id=1 and date=2016-1-1 then record id=2 date=2015-12-31 could not exist. If you are worried about locks/exceptions you should add a transaction in the WHILE block and commit or rollback on failure.
Change the #batchSize to whatever you feel is right after some experimentation.
DECLARE #currentId int, #maxId int, #batchSize int = 10000
SELECT #currentId = MIN(ID), #maxId = MAX(ID) FROM YOURTABLE WHERE DATE >= '2016-01-01'
WHILE #currentId < #maxId
BEGIN
UPDATE YOURTABLE SET FLAG = 1 WHERE ID BETWEEN #currentId AND (#currentId + #batchSize)
SET #currentId = #currentId + #batchSize
END
As this as the update will never flag the same record to 1 twice I do not see a need to track which records were touched unless you are going to manually stop the process partway through.
You should also ensure that the ID column has an index on it so the retrieval is fast in each update statement.
Looks like a simple question or maybe I'm missing something.
You can create a temp/permanent table to keep track of updated rows.
create tbl (Id int) -- or temp table based on your case
insert into tbl values (0)
declare #lastId int = (select Id from tbl)
;with cte as (
select top 100000
from YourMainTable
where Id > #lastId
ORDER BY Id
)
update cte
set Flag = 1
update tbl set Id = #lastId + 100000
You can do this process in a loop (except the table creation part)
create table #tmp_table
(
id int ,
row_number int
)
insert into #tmp_table
(
id,
row_number
)
--logic to load records from base table
select
bt.id,
row_number() over(partition by id order by id ) as row_number
from
dbo.bas_table bt
where
--ur logic to limit the records
declare #batch_size int = 100000;
declare #start_row_number int,#end_row_number int;
select
#start_row_number = min(row_number),
#end_row_number = max(row_number)
from
#tmp_table
while(#start_row_number < #end_row_number)
begin
update top #batch_size
bt
set
bt.flag = 1
from
dbo.base_table bt
inner join #tmp_table tt on
tt.Id = bt.Id
where
bt.row_number between #start_row_number and (#start_row_number + #batch_size)
set #start_row_number = #start_row_number + #batch_size
end
I've been doing some SQL Server procedures optimization lately and was looking for a testing pattern (time and result wise). I've came with this solution so far:
SET NOCOUNT ON;
----------------------------------------------------------------------------------------------------------------
-- Procedures data and performance testing pattern
----------------------------------------------------------------------------------------------------------------
-- Prepare test queries (most likely will be taken from Logs.ProcedureTraceData (DATAUK/DATAUS servers)
-- Procedures should insert records into Temporary table, so we can compare their results using EXCEPT
-- If result set columns are fixed (i.e. no Dynamic SQL is used), we can create Temporary tables inside script
-- and insert records in them to do comparison and just TRUNCATE them at the end of the loop.
-- example here: http://stackoverflow.com/a/654418/3680098
-- If there're any data discrepancies or record counts are different, it will be displayed in TraceLog table
----------------------------------------------------------------------------------------------------------------
-- Create your own TraceLog table to keep records
----------------------------------------------------------------------------------------------------------------
/*
CREATE TABLE Temporary._EB_TraceLog
(
ID INT NOT NULL IDENTITY(1, 1) CONSTRAINT PK_Temporary_EB_TraceLog_ID PRIMARY KEY
, CurrentExecutionTime INT
, TempExecutionTime INT
, CurrentExecutionResultsCount INT
, TempExecutionResultsCount INT
, IsDifferent BIT CONSTRAINT DF_Temporary_EB_TraceLog_IsDifferent DEFAULT 0 NOT NULL
, TimeDiff AS CurrentExecutionTime - TempExecutionTime
, PercentageDiff AS CAST(((CAST(CurrentExecutionTime AS DECIMAL)/ CAST(TempExecutionTime AS DECIMAL)) * 100 - 100) AS DECIMAL(10, 2))
, TextData NVARCHAR(MAX)
);
SELECT *
FROM Temporary._EB_TraceLog;
TRUNCATE TABLE Temporary._EB_TraceLog;
*/
INSERT INTO Temporary._EB_TraceLog (TextData)
SELECT TextData
FROM Temporary._EB_GetData_Timeouts
EXCEPT
SELECT TextData
FROM Temporary._EB_TraceLog;
DECLARE #Counter INT;
SELECT #Counter = MIN(ID)
FROM Temporary._EB_TraceLog
WHERE CurrentExecutionTime IS NULL
OR TempExecutionTime IS NULL
OR CurrentExecutionResultsCount IS NULL
OR TempExecutionResultsCount IS NULL;
WHILE #Counter <= (SELECT MAX(ID) FROM Temporary._EB_TraceLog)
BEGIN
DECLARE #SQLStringCurr NVARCHAR(MAX);
DECLARE #SQLStringTemp NVARCHAR(MAX);
DECLARE #StartTime DATETIME2;
SELECT #SQLStringCurr = REPLACE(TextData, 'dbo.GetData', 'Temporary._EB_GetData_Orig')
, #SQLStringTemp = REPLACE(TextData, 'dbo.GetData', 'Temporary._EB_GetData_Mod')
FROM Temporary._EB_TraceLog
WHERE ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Drop temporary tables in script, so these numbers don't figure in SP execution time
----------------------------------------------------------------------------------------------------------------
IF OBJECT_ID(N'Temporary._EB_Test_Orig') IS NOT NULL
DROP TABLE Temporary._EB_Test_Orig;
IF OBJECT_ID(N'Temporary._EB_Test_Mod') IS NOT NULL
DROP TABLE Temporary._EB_Test_Mod;
----------------------------------------------------------------------------------------------------------------
-- Actual testing
----------------------------------------------------------------------------------------------------------------
-- Take time snapshot and execute original procedure, which inserts records into Temporary table
-- When done - measurements will be updated on TraceLog table
----------------------------------------------------------------------------------------------------------------
SELECT #StartTime = CURRENT_TIMESTAMP;
EXECUTE sp_executesql #SQLStringCurr;
UPDATE T
SET T.CurrentExecutionTime = DATEDIFF(MILLISECOND, #StartTime, CURRENT_TIMESTAMP)
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Take time snapshot and execute optimized procedure, which inserts records into Temporary table
-- When done - measurements will be updated on TraceLog table
----------------------------------------------------------------------------------------------------------------
SELECT #StartTime = CURRENT_TIMESTAMP;
EXECUTE sp_executesql #SQLStringTemp;
UPDATE T
SET T.TempExecutionTime = DATEDIFF(MILLISECOND, #StartTime, CURRENT_TIMESTAMP)
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Check if there are any data discrepancies
-- If there are any, set IsDifferent to 1, so we can find the root cause
----------------------------------------------------------------------------------------------------------------
IF EXISTS (SELECT * FROM Temporary._EB_Test_Mod EXCEPT SELECT * FROM Temporary._EB_Test_Orig)
OR EXISTS (SELECT * FROM Temporary._EB_Test_Orig EXCEPT SELECT * FROM Temporary._EB_Test_Mod)
BEGIN
UPDATE T
SET T.IsDifferent = 1
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
END
----------------------------------------------------------------------------------------------------------------
-- Update record counts for each execution
-- We can check if there aren't any different record counts even tho results are same
-- EXCEPT clause removes duplicates when doing checks
----------------------------------------------------------------------------------------------------------------
UPDATE T
SET T.CurrentExecutionResultsCount = (SELECT COUNT(*) FROM Temporary._EB_Test_Orig)
, T.TempExecutionResultsCount = (SELECT COUNT(*) FROM Temporary._EB_Test_Mod)
FROM Temporary._EB_TraceLog AS T
WHERE T.ID = #Counter;
----------------------------------------------------------------------------------------------------------------
-- Print iteration number and proceed on next one
----------------------------------------------------------------------------------------------------------------
PRINT #Counter;
SET #Counter += 1;
END
SELECT *
FROM Temporary._EB_TraceLog;
This works quite well so far, but I would like to include IO and TIME statistics in each iteration. Is that possible?
I know I can do it using:
SET STATISTICS IO ON;
SET STATISTICS TIME ON;
But is there a way to grab summed up values and put them in my TraceLog table?
And on top of that, is there anything doesn't make sense in this piece of code?
Thanks
you can use this query
SELECT total_elapsed_time
FROM sys.dm_exec_query_stats
WHERE sql_handle in (SELECT most_recent_sql_handle
FROM sys.dm_exec_connections
CROSS APPLY sys.dm_exec_sql_text(most_recent_sql_handle)
WHERE session_id = (##spid))
We have a database with a table called WarehouseItem where product's stock levels are kept. I need to know when ever this table get's updated, so I created a trigger to put the primary key of this table row that got updated; into a separate table (like a queue system).
This is my trigger:
IF ((SELECT COUNT(*) FROM sys.triggers WHERE name = 'IC_StockUpdate') > 0)
DROP TRIGGER [dbo].[IC_StockUpdate]
GO
CREATE TRIGGER [dbo].[IC_StockUpdate] ON [dbo].[WarehouseItem]
AFTER UPDATE
AS
BEGIN
-- Get Product Id
DECLARE #StockItemID INT = (SELECT ItemID FROM INSERTED);
DECLARE #WarehouseID INT = (SELECT WarehouseID FROM INSERTED);
-- Proceed If This Product Is Syncable
IF (dbo.IC_CanSyncProduct(#StockItemID) = 1)
BEGIN
-- Proceed If This Warehouse Is Syncable
IF (dbo.IC_CanSyncStock(#WarehouseID) = 1)
BEGIN
-- Check If Product Is Synced
IF ((SELECT COUNT(*) FROM IC_ProductCreateQueue WHERE StockItemID = #StockItemID) > 0)
BEGIN
-- Check If Stock Update Queue Entry Already Exists
IF ((SELECT COUNT(*) FROM IC_StockUpdateQueue WHERE StockItemID = #StockItemID) > 0)
BEGIN
-- Reset [StockUpdate] Queue Entry
UPDATE IC_StockUpdateQueue SET Synced = 0
WHERE StockItemID = #StockItemID;
END
ELSE
BEGIN
-- Insert [StockUpdate] Queue Entry
INSERT INTO IC_StockUpdateQueue (StockItemID, Synced) VALUES
(#StockItemID, 0);
END
END
ELSE
BEGIN
-- Insert [ProductCreate] Queue Entry
INSERT INTO IC_ProductCreateQueue (StockItemID, Synced) VALUES
(#StockItemID, 0);
-- Insert [StockUpdate] Queue Entry
INSERT INTO IC_StockUpdateQueue (StockItemID, Synced) VALUES
(#StockItemID, 0);
END
END
END
END
GO
This works perfectly fine, if only a single row is updated in the "WarehouseItem" table.
However, if more than one row is updated in this table, my trigger is failing to handle it:
Is there a way to iterate through the "inserted" collection after a mass update event? Or how does one handle multiple row updates in trigger?
You use this:
-- Get Product Id
DECLARE #StockItemID INT = (SELECT ItemID FROM INSERTED);
DECLARE #WarehouseID INT = (SELECT WarehouseID FROM INSERTED);
But if you update multi rows (as your sample) you must use a different strategy.
For example, instead to declare a variable, use INSERTED table in JOIN in query where now you use your variable.
IF statement works on your variable but I think to move that condition in query.
Try to change you UPDATE query in this way (eventually add condition of IF):
-- Reset [StockUpdate] Queue Entry
UPDATE IC_StockUpdateQueue SET Synced = 0
FROM inserted
WHERE inserted.itemID = StockItemID;
And so on.
For further information please add comment.
You could use a loop to iterate over INSERTED but it may be better to change your scalar variables into a TABLE and INSERT-SELECT from INSERTED where the IDs meet the criteria of the first two IFs
DECLARE #inserted TABLE (StockItemID INT, WarehouseID INT)
INSERT INTO #inserted (StockItemID, WarehouseID)
SELECT StockItemID, WarehouseID
FROM INSERTED i
WHERE dbo.IC_CanSyncProduct(i.StockItemID)=1
AND dbo.IC_CanSyncStock(i.WarehouseID)=1
then you can remove the if else upsert logic and use queries that further filter #inserted for the various updates and inserts that are required
;WITH ResetQueueEntry
(
SELECT StockItemID
FROM #inserted i
WHERE EXISTS(SELECT 1 FROM IC_ProductCreateQueue q WHERE q.StockItemID = i.StockItemID)
AND EXISTS(SELECT 1 FROM IC_StockUpdateQueue q WHERE q.StockItemID = i.StockItemID))
)
-- Reset [StockUpdate] Queue Entry
UPDATE IC_StockUpdateQueue
SET Synced = 0
WHERE StockItemID IN (SELECT StockItemID FROM ResetStockUpdate);
WITH InsertQueueEntry
(
SELECT StockItemId, 0 Synced
FROM #inserted
WHERE EXISTS(SELECT 1 FROM IC_ProductCreateQueue q WHERE q.StockItemID = i.StockItemID)
AND NOT EXISTS(SELECT 1 FROM IC_StockUpdateQueue q WHERE q.StockItemID = i.StockItemID))
)
-- Insert [StockUpdate] Queue Entry
INSERT INTO IC_StockUpdateQueue (StockItemID, Synced)
SELECT StockItemID, Synced
FROM InsertQueueEntry
WITH CreateProductEntry
(
SELECT StockItemId, 0 Synced
FROM #inserted
WHERE NOT EXISTS(SELECT 1 FROM IC_ProductCreateQueue q WHERE q.StockItemID = i.StockItemID)
)
-- Insert [ProductCreate] Queue Entry
INSERT INTO IC_ProductCreateQueue (StockItemID, Synced)
SELECT StockItemId, Synced
FROM CreateProductEntry
WITH CreateStockEntry
(
SELECT StockItemId, 0 Synced
FROM #inserted
WHERE NOT EXISTS(SELECT 1 FROM IC_ProductCreateQueue q WHERE q.StockItemID = i.StockItemID)
)
-- Insert [StockUpdate] Queue Entry
INSERT INTO IC_StockUpdateQueue (StockItemID, Synced)
SELECT StockItemId, Synced
FROM CreateProductEntry
in case of the trigger is for INSERT, UPDATE
this code will exit the trigger IF Records are being updated AND more than one record is being afftected:
IF (SELECT COUNT(*) FROM Deleted) > 1
BEGIN
Return
END
But if you wish to examin every record in the INSERTED recordset you can use this method:
DECLARE rstAST CURSOR FOR
SELECT ins.TaskActionId,
_Task.CustomerId,
_AST.ASTQRId,
ins.ExistingQRcode,
ins.NewQRcode
FROM Inserted ins INNER JOIN
dbo.cdn_AST _AST ON ins.ASTId = _AST.ASTId INNER JOIN
dbo.tsk_Task _Task ON ins.TaskId = _Task.TaskId
OPEN rstAST
FETCH NEXT FROM rstAST INTO #TaskActionId, #TaskCustomerId, #ASTQRId, #ExistingQRcode, #NewQRcode
WHILE ##FETCH_STATUS = 0
BEGIN
--use CONTINUE to skip next record or let it traverse the loop
FETCH NEXT FROM rstAST INTO #TaskActionId, #TaskCustomerId, #ASTQRId, #ExistingQRcode, #NewQRcode
END
CLOSE rstAST
DEALLOCATE rstAST
I have two tables with overlaping data. One table is about 90% duplicate of the other. I need to identify the 10% of unique records in a table and move it to it's parent table. Both of these tables are 400 million + rows with 300+ columns. The method I am attempting is adding a flag field to uniquely ID the records I need to transfer however I need to update the field and am struggling with the logic. Below is what I have put together so far and it causes a never ending loop. There are no null values in either table.
Declare #counter int
Declare #RowsEffected int
Declare #RowsCnt int
Declare #Err int
SELECT #COUNTER = 1
SELECT #RowsEffected = 0
while (#counter > 0)
begin
set Rowcount 10000000
update Table1
set Existsflg = 1
where exists (
Select Fields
from Table1
Except
Select Fields
from table2 )
Select #RowsCnt = ##ROWCOUNT , #Err = ##ERROR
If #Err <> 0
begin
Print 'Problem Updating the records'
end
IF #RowsCnt = 0
SELECT #COUNTER = 0
ELSE
SELECT #RowsEffected = #RowsEffected + #RowsCnt
PRINT 'The total number of rows effected :'+convert(varchar,#RowsEffected)
WAITFOR DELAY '00:00:10'
END
SET ROWCOUNT 0
Go
Thanks!
This is how I did it one time.
I didn't use RowCount, I used Select TOP (N) and "while exists"
My "source" dbo.Employee table was on another server.
GO
USE [$(DestinationDatabaseName)]
GO
/*
READ ME !!!
Replace
$(SourceServer).$(SourceDatabaseName)
With (the Server and DatabaseName of the SOURCE data)
(ex:) [OtherServer].[OtherDatabase]
*/
--SubFolder: SQLReplicateReplacer
print '[uspEmployeeReplicateReplacer]'
go
IF EXISTS (SELECT * FROM sys.objects WHERE object_id = OBJECT_ID(N'[dbo].[uspEmployeeReplicateReplacer]') AND type in (N'P', N'PC'))
DROP PROCEDURE [dbo].[uspEmployeeReplicateReplacer]
Go
/*
declare #numberRowsAffected int
declare #ErrorNumber int
exec [dbo].[uspEmployeeReplicateReplacer] #numberRowsAffected output , #ErrorNumber output
print #numberRowsAffected
print #ErrorNumber
print ''
*/
CREATE PROCEDURE [dbo].[uspEmployeeReplicateReplacer] (
#numberRowsAffected int output --return
,
#ErrorNumber int output
)
AS
SET NOCOUNT ON
select #ErrorNumber = 0
declare #ErrorTracker int
declare #insertRowCount int
declare #updateRowCount int
select #insertRowCount = 0
select #updateRowCount = 0
IF OBJECT_ID('tempdb..#Employeeupdate') IS NOT NULL
begin
drop table #Employeeupdate
end
CREATE TABLE #Employeeupdate (
EmployeeKeyID int IDENTITY (1,1),
EmployeeUUID uniqueidentifier,
EmployeeLabel varchar(64),
EmployeeDescription varchar(128)
)
declare #ManualReplicationRowCount int
/* I put this value in a stored procedure, so I could change it in one place */
/* EXEC dbo.uspInternalSettingGetManualReplicationRowCount #ManualReplicationRowCount output */
Select #ManualReplicationRowCount = 1000
declare #MaximumLoopCounter int
select #MaximumLoopCounter = 10000
while (#MaximumLoopCounter > 0) and exists
(
Select
TOP 1 null
from [$(SourceServer)].[$(SourceDatabaseName)].dbo.Employee vart with (nolock)
where not exists
(
select null from dbo.Employee with (nolock) -- destinationTable
Where
/*
destinationTable.SOMEUNIQUECOLUMN1 = sourceTable.SOMEUNIQUECOLUMN1
and
destinationTable.SOMEUNIQUECOLUMN2 = sourceTable.SOMEUNIQUECOLUMN2
*/
dbo.Employee.EmployeeUUID = vart.EmployeeUUID
)
)
BEGIN
select #MaximumLoopCounter = #MaximumLoopCounter - 1
DELETE FROM #Employeeupdate
Insert into #Employeeupdate
(
EmployeeUUID,
EmployeeLabel,
EmployeeDescription
)
Select
TOP (#ManualReplicationRowCount)
EmployeeUUID,
EmployeeLabel,
EmployeeDescription
from [$(SourceServer)].[$(SourceDatabaseName)].dbo.Employee vart with (nolock)
where not exists
(
select null from dbo.Employee with (nolock) -- destinationTable
Where
/*
destinationTable.SOMEUNIQUECOLUMN1 = sourceTable.SOMEUNIQUECOLUMN1
and
destinationTable.SOMEUNIQUECOLUMN2 = sourceTable.SOMEUNIQUECOLUMN2
*/
dbo.Employee.EmployeeUUID = vart.EmployeeUUID
)
SET NOCOUNT OFF
Insert into dbo.Employee
(
EmployeeUUID,
EmployeeLabel,
EmployeeDescription
)
Select
EmployeeUUID,
EmployeeLabel,
EmployeeDescription
from
#Employeeupdate
SELECT #insertRowCount = ##ROWCOUNT , #ErrorTracker = ##ERROR
if #ErrorTracker <> 0
BEGIN
select #ErrorNumber = #ErrorTracker
select #MaximumLoopCounter = 0 --Bail Out !!!
END
SET NOCOUNT ON
END --End While Loop
/*
SET NOCOUNT OFF
Update dbo.Employee
Set
--EmployeeUUID = vart.EmployeeUUID,
EmployeeLabel = vart.EmployeeLabel,
EmployeeDescription = vart.EmployeeDescription
From
dbo.Employee with (nolock) , [$(SourceServer)].[$(SourceDatabaseName)].dbo.Employee vart with (nolock)
Where
--Relationship
dbo.Employee.EmployeeUUID = vart.EmployeeUUID
SELECT #updateRowCount = ##ROWCOUNT
SET NOCOUNT ON
*/
SELECT #numberRowsAffected = #insertRowCount + #updateRowCount
print '/#Employeeupdate COUNT/'
print #numberRowsAffected
print '-------------------------'
IF OBJECT_ID('tempdb..#Employeeupdate') IS NOT NULL
begin
drop table #Employeeupdate
end
SET NOCOUNT OFF
GO
GRANT EXECUTE ON dbo.uspEmployeeReplicateReplacer TO $(DBUSERNAME)
GO
I’d suggest you do this in batches of 1M-5M at a time because you have a ton of data to update.
What I would do in this case is:
a) add new column named Processed (bit) that would be updated for all rows that are processed
b) select 1M rows into temp table (this may not be needed but it will make things a bit
cleaner)
c) insert all non-duplicate records into other table
d) update rows and mark them as processes