SQL Server - How do I Create/COMMIT Log Records during a Running Process that will potentially be ROLLEDBACK - sql-server

I am having much difficulty attempting to replicate Logging as I had done in Oracle using PRAGRMA AUTOMOUS_TRANSACTION which allows you to COMMIT records to a LOG table while NOT COMMITing any other DML operations. I've been banging my head as to how more experienced SQL Server guys are LOGGING the Success or Errors of their Database Programs/Processes. Bascially how are experienced T-SQL guys logging in the middle of an active T-SQL program? Another way to put it... I have a HUGE process that is NOT to be COMMITed until the entire process executes without Critical Errors but I still need to log ALL Errors and if it's a Critical Error then ROLLBACK entire process but I still need the Logs.
Using MERGE as Example demonstrating my inability to COMMIT some records but ROLLING back others ...
I have TWO named Transactions in the below script (1. sourceData, 2. mainProcess)... This is my first attempt at trying to accomplish this goal. First the script INSERTs records using the 1st Transaction into a table without COMMIT. Then in Transaction 2 in the MERGE block I am INSERTing records into the Destination table and the Log table and I COMMIT transaction 2 (mainProcess).
I then AM ROLLING BACK the 1st named Transaction (sourceData)..
The issue is... EVERYTHING is getting ROLLED Back even though I explicitly COMMITed the mainProcess Transaction.
GO
/* temp table to simulate Log Table */
IF OBJECT_ID('tempdb..#LogTable') IS NULL
CREATE TABLE #LogTable
( Action VARCHAR(50),
primaryID INT,
secondaryID INT,
CustomID INT,
Note VARCHAR(200),
ConvDate DATE
) --DROP TABLE IF EXISTS #LogTable;
; /* SELECT * FROM #LogTable; TRUNCATE TABLE #LogTable; */
/* SELECT * FROM #ProductionSrcTable */
IF OBJECT_ID('tempdb..#ProductionSrcTable') IS NULL
CREATE TABLE #ProductionSrcTable( primaryKey INT, contactName VARCHAR(200), sourceKey INT )
; --DROP TABLE IF EXISTS #ProductionSrcTable; TRUNCATE TABLE #ProductionSrcTable;
/* SELECT * FROM #ProductionDestTable */
IF OBJECT_ID('tempdb..#ProductionDestTable') IS NULL
CREATE TABLE #ProductionDestTable( primaryKey INT, contactName VARCHAR(200), secondaryKey INT )
; --DROP TABLE IF EXISTS #ProductionDestTable; TRUNCATE TABLE #ProductionDestTable;
GO
/* Insert some fake data into Source Table */
BEGIN TRAN sourceData
BEGIN TRY
INSERT INTO #ProductionSrcTable
SELECT 1001 AS primaryKey, 'Jason' AS contactName, 789105 AS sourceKey UNION ALL
SELECT 1002 AS primaryKey, 'Jane' AS contactName, 789185 AS sourceKey UNION ALL
SELECT 1003 AS primaryKey, 'Sam' AS contactName, 788181 AS sourceKey UNION ALL
SELECT 1004 AS primaryKey, 'Susan' AS contactName, 681181 AS sourceKey
;
END TRY
BEGIN CATCH
ROLLBACK TRANSACTION sourceData
END CATCH
/* COMMIT below is purposely commented out in order to Test */
--COMMIT TRANSACTION sourceData
GO
BEGIN TRAN mainProcess
DECLARE #insertedRecords INT = 0,
#CustomID INT = 2,
#Note VARCHAR(200) = 'Test Temp DB Record Population via OUTPUT Clause',
#ConvDate DATE = getDate()
;
BEGIN TRY
MERGE INTO #ProductionDestTable AS dest
USING
(
SELECT src.primaryKey, src.contactName, src.sourceKey FROM #ProductionSrcTable src
) AS src ON src.primaryKey = dest.primaryKey AND src.sourceKey = dest.secondaryKey
WHEN NOT MATCHED BY TARGET
THEN
INSERT --INTO ProductionDestTable
( primaryKey, contactName, secondaryKey )
VALUES
( src.primaryKey, src.contactName, src.sourceKey )
/* Insert Output in Log Table */
OUTPUT $action, inserted.primaryKey, src.sourceKey, #CustomID, #Note, #ConvDate INTO #LogTable;
; /* END MERGE */
/* Store the number of inserted Records into the insertedRecords variable */
SET #insertedRecords = ##ROWCOUNT;
END TRY
BEGIN CATCH
ROLLBACK TRANSACTION mainProcess
END CATCH
;
--ROLLBACK TRANSACTION mainProcess
COMMIT TRANSACTION mainProcess
ROLLBACK TRANSACTION sourceData
PRINT 'Records Inserted:' + CAST(#insertedRecords AS VARCHAR);
/* END */
--SELECT ##TRANCOUNT

Related

How to read updated data in a stored procedure called multiple times simultaneously

There are 2 tables:
Wallets;
Transactions.
There is a stored procedure that handles (I think with ACID operation):
updating on Wallet table
inserting one row into Transactions table
every time it is called.
The issue occurs when there are many calls to the SP at same time, infact the value of PreviousBalance is not correct (sequentially wrong), cause in the SP read old value, meantime another process of call is running.
To understand better look the following screenshot.
There are 3 Transaction with same DT (IDs 1289, 1288, 1287), in all of those PreviouseBalance is equal, but is not correct, because the value for :
Trx ID 1288 should be 180,78 as Balance of previous row;
Trx ID 1289 should be 168,07 = 180,78 - 12,08
I think that the issue is in the SET of #OLDBalance var; at same time those 3 thread read same value, so when the SP goes to INSERT loads same value of PreviousBalance.
How can I do in order to read #OLDBalance correct after commit of one operation?
I tried to set several type of Isolation Levet into SP, the result was the same and sometime went in error for deadlock.
I have the following stored procedure:
Stored Procedure
ALTER PROCEDURE [dbo].[upsMovimenta_internal]
#AccountID int,
#Amount money,
#TypeTransactionID int,
#ProductID int,
#notes nvarchar(max)
AS
BEGIN
SET NOCOUNT ON;
DECLARE #OLDBalance MONEY;
DECLARE #PreviousBalance MONEY;
DECLARE #CurrentBalance MONEY;
DECLARE #Molt float;
BEGIN TRANSACTION;
IF NOT EXISTS( SELECT * FROM Accounts WHERE AccountID = #AccountID)
BEGIN
RaisError(N'Account not found ', 10, 1, N'number', 3);
return (1)
END
SELECT #Molt = Moltiplicatore
FROM TypeTransactions
where TypeTransactionID = #TypeTransactionID
;
IF (#Molt is null )
BEGIN
RaisError(N'Error transaction', 10, 1, N'number', 3);
return (1)
END
SET #Amount = #Amount * #Molt;
--SELECT * FROM Wallets
SELECT TOP 1 #OLDBalance = TotalAmount
FROM Wallets
where AccountID = #AccountID
;
SET #CurrentBalance = #OLDBalance + #Amount;
IF (#ProductID = 1 )
BEGIN
UPDATE Wallets
SET TotalAmount+=#Amount,
Cash+=#Amount
FROM Wallets where AccountID = #AccountID
;
END
IF (#ProductID = 2 )
BEGIN
UPDATE Wallets
SET TotalAmount+=#Amount,
Fun+=#Amount
FROM Wallets where AccountID = #AccountID
;
END
INSERT INTO Transactions
( AccountID, ProductID, DT, TypeTransactionID, Amout, Balance, PreviousBalance, Notes )
VALUES
( #AccountID, #ProductID, GETDATE(), #TypeTransactionID, #Amount, #CurrentBalance, #OLDBalance, #notes)
;
COMMIT TRANSACTION;
return (0)
END
Thank you so much guys
Generally, one way of managing locks on records, is to apply a dummy update on the rows you want to work on, right after starting transaction.
In this case SQL Server guarantees that those rows will be locked and no other transactions can access the rows. So you can change your design to something like this:
begin tran
update myTable
set Field1 = Field1
where someKeyField = 212
-- do the same for other tables that you want to protect against change
-- at this moment all working rows will be locked, other procedure calls will be on hold
-- do your main operations here
commit tran
The issue with this will be the other proc calls will wait and this may degrade performance or even time-out if the traffic is high and your operation in this proc is lengthy
If you are working on high transaction environment, you need to change your design.
Update: Design Suggestion
I don't get why you have PreviousBalance and Balance in your transaction (it is against the design rules, however you can override rule in special case).
Probably you have that to speed up your calculations or make your queries simpler. But it is not good practice in OLTP database.
Rules say you keep the Amount column and calculate PreviousBalance and Balance somewhere else.
You should drop PreviousBalance but keep the Balance column, and every time you insert a transaction, you update (increase/decrease) the Balance column. Plus you need to initialize the Balance column at the first transaction.
This is what I can think of. If I knew your whole system, I would be able to have better ideas though.

Finding accounts greater than specified transaction amount

I have a bank db with two tables accountmaster and transactionmaster.
Accountmaster has columns:
accid(pk)
accname
bal
branch
Transactionmaster columns:
Tnumber(pk)
dot
txnAmt
transactiontype
accid(fk)
branch(fk)
I want to find the below without using instead of triggers.
whenever a transaction is made by the accountholder (transaction type like deposit, withdraw) it should reflect in the balance of accountmaster table.
whenever an accountholder makes a transaction > 50000 (withdraw or deposit), that transaction details are to be inserted into a new table 'hightransaction' and remove that particular transaction in the transactionmaster table.
I tried something like this but in the result only column names are displayed and no values.
First I copied the transactionmaster into newtable hightransaction
select *
into hightransaction
from transactionmaster
where 1 = 2
then I created a trigger
create trigger [dbo].[transaction2]
on transactionmaster
for insert
as
declare #transtype nvarchar(10);
select #transtype = [TXNTYPE]
from inserted
if (select txnamt from inserted) > 75000
begin
insert into [dbo].[hightransactionmaster3]
select
dot, txntype, chqnum, chqdate, txnamt,
acid, brid, userid
from
inserted
end
else
begin
insert into [dbo].[TRANSACTIONMASTER]
select
dot, txntype, chqnum, chqdate, txnamt,
acid, brid, userid
from
inserted
end
and I tried to execute
select * from hightransaction
The output is only column names and no values.
I am thinking of a stored procedure like this.
CREATE PROCEDURE [dbo].[transaction]
#transactionAmt int
/*
ADD OTHER PARAMETERS HERE
*/
AS
BEGIN
SET NOCOUNT ON;
IF (#transactionAmt > 50000)
BEGIN
/*INSERT STATEMENT FOR HIGHTRANSCTION TABLE*/
END
ELSE
BEGIN
/*INSERT STATEMENT FOR TRANSACTIONMASTER TABLE*/
END
END
GO
This is provided that you have control over the application and can have it pass the parameters into a stored procedure.

SQL Server : Cannot roll back ...No transaction or savepoint of that name was found

We have Perl script that bcp's data for various entities to stage tables , and then invokes a stored procedure via DBI(Autocommit=0) to move data from Stage-Core tables for these entities.
The stored procedure pseudo code is as below
-- GET DETAILS from stage to temproaray tables
SELECT * FROM <MAIN_STG_TBL> INTO #MAIN_TBL
SELECT * FROM <SUPP_STG_1> INTO #supp_1
SELECT * FROM <SUPP_STG_2> INTO #supp_2
.....
SELECT * FROM <SUPP_STG_10> INTO #supp_10
RAISERROR (<INFO_MSG>, 10, 1) WITH NOWAIT
-- for each entity in #MAIN_TBL
-- move datae from stage to core
WHILE_LOOP ( EACH <ENTRY> in #MAIN_TBL )
BEGIN TRY
BEGIN TRAN STG_CORE
INSERT INTO <MAIN_CORE> SELECT * FROM #MAIN_TBL WHERE ENTITY=<ENTRY>
INSERT INTO <SUPP_CORE_1> SELECT * FROM #supp_1 WHERE ENTITY=<ENTRY>
INSERT INTO <SUPP_CORE_1> SELECT * FROM #supp_2 WHERE ENTITY=<ENTRY>
.....
INSERT INTO <SUPP_CORE_10> SELECT * FROM #supp_10 WHERE ENTITY=<ENTRY>
COMMIT TRAN STG_CORE;
END TRY
BEGIN CATCH
ROLLBACK TRAN STG_CORE;
END CATCH
RAISERROR (<INFO_MSG>, 10, 1) WITH NOWAIT
END LOOP
RAISERROR (<INFO_MSG>, 10, 1) WITH NOWAIT
The stored procedure itself is invoked in a AutoCommit=0 connection , and is immediately followed with DBI->commit
ISSUE
For all failed entities 200/1000 entities mostly due to Primary Key Violation in some of the SUPP Tables seeing the message STAGE_TO_CORE_SP text=Cannot roll back STG_CORE. No transaction or savepoint of that name was found

Identity key counter increment by one although it is in TRY Catch and Transaction is roll-backed ? SSMS 2008

Identity counter increment by one although it is in TRY Catch and Transaction is roll-backed ? SSMS 2008 is there any way i can stop it +1 or rollback it too.
In order to understand why this happened, Let's execute below sample code first-
USE tempdb
CREATE TABLE dbo.Sales
(ID INT IDENTITY(1,1), Address VARCHAR(200))
GO
BEGIN TRANSACTION
INSERT DBO.Sales
( Address )
VALUES ( 'Dwarka, Delhi' );
ROLLBACK TRANSACTION
Now, Execution plan for above query is-
The second last operator from right Compute Scalar is computing value for [Expr1003]=getidentity((629577281),(2),NULL) which is IDENTITY value for ID column. So this clearly indicates that IDENTITY values are fetched & Incremented prior to Insertion (INSERT Operator). So its by nature that even transaction rollback at later stage once created IDENTITY value is there.
Now, in order to reseed the IDENTITY value to Maximum Identity Value present in table + 1, you need sysadmin permission to execute below DBCC command -
DBCC CHECKIDENT
(
table_name
[, { NORESEED | { RESEED [, new_reseed_value ] } } ]
)
[ WITH NO_INFOMSGS ]
So the final query should include below piece of code prior to rollback statement:-
-- Code to check max ID value, and verify it again IDENTITY SEED
DECLARE #MaxValue INT = (SELECT ISNULL(MAX(ID),1) FROM dbo.Sales)
IF #MaxValue IS NOT NULL AND #MaxValue <> IDENT_CURRENT('dbo.Sales')
DBCC CHECKIDENT ( 'dbo.Sales', RESEED, #MaxValue )
--ROLLBACK TRANSACTION
So it is recommended to leave it on SQL Server.
You are right and the following code inserts record with [Col01] equal to 2:
CREATE TABLE [dbo].[DataSource]
(
[Col01] SMALLINT IDENTITY(1,1)
,[Col02] TINYINT
);
GO
BEGIN TRY
BEGIN TRANSACTION;
INSERT INTO [dbo].[DataSource] ([Col02])
VALUES (1);
SELECT 1/0
END TRY
BEGIN CATCH
IF ##TRANCOUNT > 0
BEGIN
ROLLBACK TRANSACTION
END;
END CATCH;
GO
INSERT INTO [dbo].[DataSource] ([Col02])
VALUES (1);
SELECT *
FROM [dbo].[DataSource]
This is by design (as you can see in the documentation:
Consecutive values after server restart or other failures –SQL Server
might cache identity values for performance reasons and some of the
assigned values can be lost during a database failure or server
restart. This can result in gaps in the identity value upon insert. If
gaps are not acceptable then the application should use its own
mechanism to generate key values. Using a sequence generator with the
NOCACHE option can limit the gaps to transactions that are never
committed.
I try using NOCACHE sequence but it does not work on SQL Server 2012:
CREATE TABLE [dbo].[DataSource]
(
[Col01] SMALLINT
,[Col02] TINYINT
);
CREATE SEQUENCE [dbo].[MyIndentyty]
START WITH 1
INCREMENT BY 1
NO CACHE;
GO
BEGIN TRY
BEGIN TRANSACTION;
INSERT INTO [dbo].[DataSource] ([Col01], [Col02])
SELECT NEXT VALUE FOR [dbo].[MyIndentyty], 1
SELECT 1/0
END TRY
BEGIN CATCH
IF ##TRANCOUNT > 0
BEGIN
ROLLBACK TRANSACTION
END;
END CATCH;
GO
INSERT INTO [dbo].[DataSource] ([Col01], [Col02])
SELECT NEXT VALUE FOR [dbo].[MyIndentyty], 1
SELECT *
FROM [dbo].[DataSource]
DROP TABLE [dbo].[DataSource];
DROP SEQUENCE [dbo].[MyIndentyty];
You can use MAX to solve this:
CREATE TABLE [dbo].[DataSource]
(
[Col01] SMALLINT
,[Col02] TINYINT
);
BEGIN TRY
BEGIN TRANSACTION;
DECLARE #Value SMALLINT = (SELECT MAX([Col01]) FROM [dbo].[DataSource]);
INSERT INTO [dbo].[DataSource] ([Col01], [Col02])
SELECT #Value, 1
SELECT 1/0
END TRY
BEGIN CATCH
IF ##TRANCOUNT > 0
BEGIN
ROLLBACK TRANSACTION
END;
END CATCH;
GO
DECLARE #Value SMALLINT = ISNULL((SELECT MAX([Col01]) FROM [dbo].[DataSource]), 1);
INSERT INTO [dbo].[DataSource] ([Col01], [Col02])
SELECT #Value, 1
SELECT *
FROM [dbo].[DataSource]
DROP TABLE [dbo].[DataSource];
But you must pay attentions to your isolation level for potential issues:
If you want to insert many rows at the same time, do the following:
get the current max value
create table where to store the rows (that are going to be inserted) generating ranking (you can use identity column, you can use ranking function) and adding the max value to it
insert the rows

how to create an agent job depending on time

I am creating an agent job, using SQL server. In my database there are 2 tables.
The columns in the first table are:
Idproduct, number
The columns in the second tables are:
Idproduct, start (datatime), duration (time)
I need to increment the field number of a product when its (start+duration)<= getdate() and then i need delete this record.
How can i do?
Create table product(
Idproduct int primary key,
Number int default 0)
Create table production(
Idproduct int primary key,
Start datetime not null,
Times time not null)
One method is with the OUTPUT clause of a DELETE statement to insert the produced products into a table variable. Then use the table variable to increment the count. The example below uses SQL 2012 and above features but you retrofit the error handling for earlier versions if needed.
SET XACT_ABORT ON;
DECLARE #ProducedProducts TABLE(
Idproduct int
);
BEGIN TRY
BEGIN TRAN;
--delete produced products
DELETE FROM dbo.production
OUTPUT deleted.Idproduct INTO #ProducedProducts
WHERE
DATEADD(millisecond, DATEDIFF(millisecond, '', Times), Start) <= GETDATE();
--increment produced products count
UPDATE dbo.product
SET Number += 1
WHERE Idproduct IN(
SELECT pp.Idproduct
FROM #ProducedProducts AS pp
);
COMMIT;
END TRY
BEGIN CATCH
THROW;
END CATCH;

Resources