Stored procedure overdoes - inserting multiple tables - sql-server

I expect below procedure to insert one row into the orders table, grab the PK then insert it into the orderlines table this PK with as many lines as many are in the datagridview on the form, then update the stock table with the quantity and finally insert 1 row into the payments table. Unfortunately it inserts as many rows to all 3 tables as many rows are in the datagridview. Is there a way to achieve my goal preferably without braking it down into more procedures?
CREATE PROCEDURE spNewSale
#customer INT, -- passed from a comboBox
#staff VARCHAR(12), -- the user logged in
#product INT, -- passed from a comboBox
#quantity INT -- passed from a textBox
AS
BEGIN
SET NOCOUNT ON;
INSERT INTO orders (order_cust_id, order_staff_id, order_date) -- order_id (PK) = identity autoincremented
VALUES (#customer,
(SELECT logons.logon_st_id FROM logons
WHERE logons.logon_name = #staff), CURRENT_TIMESTAMP);
DECLARE #ordernbr INT = SCOPE_IDENTITY();
INSERT INTO orderlines (ol_order_id, ol_pro_id, ol_qty, ol_value) -- ol_id (PK) = identity autoincremented
VALUES (#ordernbr, #product, #quantity,
#quantity * (SELECT products.pro_price
FROM products WHERE pro_id = #product));
UPDATE products
SET pro_stock = (pro_stock - #quantity)
WHERE pro_id = #product;
INSERT INTO payments (paym_order_id, paym_amount, paym_taken_by, paym_date) -- paym_id (PK) = identity autoincremented
VALUES (#ordernbr,
(SELECT SUM(orderlines.ol_value) AS paym_amount
FROM orderlines WHERE orderlines.ol_order_id = #ordernbr),
(SELECT logons.logon_st_id FROM logons
WHERE logons.logon_name = #staff), CURRENT_TIMESTAMP)
END

Related

Random Primary Key missing in SQL Server selection

I am facing a problem when I query master table (having ~700 Million records and high transactional table) to look for newly inserted records. My aim is to get all the newly created IDs from the #IDs temp table (Min and Max records) and dump it in another child table. But random IDs are missing in the child table.
Setup:
We have a primary and secondary server (SQL Server 2016) and they are in sync mode.
Tables:
CREATE TABLE tblMaster
(
ID BIGINT IDENTITY(1,1) NOT NULL,
EmployeeID INT NOT NULL
)
CREATE TABLE tblChild
(
ChildID IDENTITY(1,1),
ID BIGINT NOT NULL,
TransactionDate Datetime NOT NULL
)
tblChild.ID references tblMaster.ID.
Stored procedure:
DECLARE #MaxID BIGINT
SELECT #MaxID = MAX(ID) FROM tblChild WITH(NOLOCK)
SET #MaxID = ISNULL(#MaxID, 0)
DROP TABLE IF EXISTS #IDS
SELECT ID
INTO #IDS
FROM tblMaster WITH(NOLOCK)
WHERE ID > #MaxID
--25k RECORDS BATCH INSERT INTO tblChild - MAINLY TAKE CARE NEWLY inserted records
STARTIDS:
IF EXISTS (SELECT TOP 1 * FROM #IDS)
BEGIN
DROP TABLE IF EXISTS #TOPIDS
SELECT TOP 25000 ID INTO #TOPIDS
FROM #IDS
ORDER BY ID ASC
INSERT INTO tblChild (ID, CreatedBy, CreatedDate)
SELECT ID, SYSTEM_USER, GETDATE()
FROM #TOPIDS
DELETE AA
FROM #IDS AA
INNER JOIN #TOPIDS BB ON AA.ID = BB.ID
GOTO STARTIDS
END
Please help where it's going wrong.

SQL Server set based processing for orders and details

I have been trying to get acclimated to set based processing with SQL Server. Below is a simplified version of cursor processing for this task. It involves creating an order from items in a shopping cart. The order is created, line items are added to the order details table, the total is accumulated and eventually updated on the order table. Can anyone suggest how to do this with a set based approach instead of a cursor?
One other question is that in most cases the cursor will process at most 10 or 12 line items at a time. Is that enough of a reason to not have to consider the set based approach?
declare getCart2 cursor for
select MemberID,ProductID,Quantity,Price
from Carts
where MemberID = #MemberID
open getCart2
fetch next from getCart2 into #MemberID,#ProductID,#Quantity,#Price
Insert into Orders
(MemberID,TotalAmount0
Values
(#MemberID, 0.00)
set #OrderID = ##Identity
while ##FETCH_STATUS = 0 Begin
Insert into OrderDetails
(OrderID,ProductID,Quantity)
Values
(#OderID,#ProductID,#Quantity)
set #TotalAmout = #TotalAmount + (#Quantity * #Price)
set #PrevMemberID = #MemberID
fetch next from getCart2 into #MemberID,#ProductID,#Quantity,#Price
End
close getCart2
deallocate getCart2
Update Orders
Set TotalAmount = #TotalAmount
Where OrderID = #OrderID
Thanks for your help.
Here goes an approach:
In this case I am creating a temporary table variable that will store the order id's on it.
Then, it performs the insertions on the Order table and after that, in the OrderDetails.
Finally, it computes the TotalAmount and updates on the Orders table.
Although you don't have it in your code (and in mine as well), but I recommend you to use this code inside a transaction.
Hope it helps you improve your performance.
USE [tempdb];
GO
SET NOCOUNT ON;
IF OBJECT_ID(N'dbo.Carts', N'U') IS NOT NULL DROP TABLE [dbo].[Carts];
IF OBJECT_ID(N'dbo.Orders', N'U') IS NOT NULL DROP TABLE [dbo].[Orders];
IF OBJECT_ID(N'dbo.OrderDetails', N'U') IS NOT NULL DROP TABLE [dbo].[OrderDetails];
GO
-- Creates the tables like you have
CREATE TABLE [dbo].[Carts] (MemberID INT, ProductID INT, Quantity INT, Price DECIMAL(10, 2));
CREATE TABLE [dbo].[Orders] (OrderID INT IDENTITY(1, 1), MemberID INT, TotalAmount DECIMAL(10, 2));
CREATE TABLE [dbo].[OrderDetails] (OrderID INT, ProductID INT, Quantity INT);
-- Inserts dummy data
INSERT INTO [dbo].[Carts] VALUES (1001, 80, 5, 25.00);
INSERT INTO [dbo].[Carts] VALUES (1002, 120, 2, 12.90);
INSERT INTO [dbo].[Carts] VALUES (1010, 70, 3, 12.00)
INSERT INTO [dbo].[Carts] VALUES (1034, 176, 5, 45.00);
-- Temporary table that stores the inserted Order ID's
DECLARE #OrdersToProcess TABLE (OrderID INT, MemberID INT);
-- Inserts all Orders
INSERT INTO Orders (MemberID, TotalAmount)
OUTPUT inserted.OrderID, inserted.MemberID INTO #OrdersToProcess
SELECT MemberID, 0
FROM [dbo].[Carts]
-- Inserts order details
INSERT INTO OrderDetails (OrderID, ProductID, Quantity)
SELECT OrderID, ProductID, Quantity
FROM [dbo].[Carts] C
INNER JOIN #OrdersToProcess O ON C.MemberID = O.MemberID;
-- Updates order totals
UPDATE [dbo].[Orders]
SET TotalAmount = T.Total FROM
(
SELECT OrderID, SUM(Quantity * Price) AS [Total]
FROM [dbo].[Carts] C
INNER JOIN #OrdersToProcess O ON C.MemberID = O.MemberID
GROUP BY OrderID
) T
WHERE [dbo].[Orders].OrderID = T.OrderID
SELECT * FROM [dbo].[Orders];
SELECT * FROM [dbo].[OrderDetails];
Results:
As I understand your problem, this store procedure should be called when a particular member presses the check-out button, so it should create a single order with all the items in the cart of that member.
You can use something like this:
INSERT INTO Orders (MemberID, TotalAmount)
VALUES (#MemberID, 0)
SET #OrderID=SCOPE_IDENTITY()
INSERT INTO OrderDetails (OrderID, ProductID, Quantity)
SELECT OrderID, ProductID, Quantity
FROM [dbo].[Carts] C
WHERE C.MemberID=#MemberID
UPDATE dbo.Orders SET TotalAmount=(
SELECT SUM(c.Quantity*c.Price)
FROM dbo.Carts c
WHERE c.MemberID=#MemberID
) WHERE OrderID=#OrderID
It's true that this reads the Carts table twice, but with a proper index (on the MemberID column) that should be fast enough.

How to make a Trigger on Master-Detail

I Have the following scenario:
CREATE TABLE dbo.Orders
(
OrderID int IDENTITY (1,1) NOT NULL
, OrderVersion int DEFAULT(1)
, Customer varchar(30)
, ScheduleDate date
, PaymentOption int
);
CREATE TABLE dbo.OrdersItems
(
OrderItemsID int IDENTITY (1,1) NOT NULL
, OrderID int
, Product varchar(100)
, Qty int
, value decimal(18,2)
);
CREATE TABLE dbo.logOrders
(
OrderID int NOT NULL
, OrderVersion int DEFAULT(1)
, Customer varchar(30)
, ScheduleDate date
, PaymentOption int
);
CREATE TABLE dbo.logOrdersItems
(
OrderItemsID int NOT NULL
, OrderID int
, Product varchar(100)
, Qty int
, value decimal(18,2)
);
-- Insert values into the table.
INSERT INTO dbo.Orders (Customer , ScheduleDate, PaymentOption)
VALUES ('John', 2016-09-01, 1);
INSERT INTO dbo.OrdersItems( OrderId, Product, Qty, Value)
VALUES (1, 'Foo', 20, 35.658),
(1, 'Bla', 50, 100)
(1, 'XYZ', 10, 3589)
First Statement
UPDATE Orders set ScheduleDate = 2016-10-05 WHERE OrderId = 1
Second Statement
Delete From OrdersItems WHERE OrderItemsID = 2
UPDATE OrdersItems set Qty = 5 WHERE OrderItemsID = 1
Third Statement
Update Orders set PaymentOption = 2 WHERE OrderId = 1
Update OrdersItems set Value = 1050 WHERE OrderItemsID = 3
I am trying to figure out how to make a trigger that after each one of the Statements Sample above Insert on the log Tables the data before the changing. And setting the OrderVersion to OrderVersion + 1 on table Orders.
So on the log Tables I will have all versions after the later one.
Is it possible to make a single trigger to monitor both tables and execute getting the original data before the UPDATE, DELETE , INSERT statement to get the original data and INSERT on the logTables ?
Here comes a sample to explain better what result I want.
This is the Initial Data on table Orders and OrdersItems
If I make an Update on Orders ( any column ) or Make an Update,Insert,Delete on OrdersItems I need to Insert on respectively logTables the data on the image.
And with this I'll have on logOrders and logItems the original data and on the Orders and Items the altered data.
I Hope I could explain better what I mean.
You will need two triggers. The trigger for the Orders table handles Orders table update/delete. The trigger for the OrdersItems table does the same for OrdersItems. The triggers look like this:
For the Orders table:
CREATE TRIGGER dbo.Orders_trigger
ON dbo.Orders
AFTER DELETE,UPDATE
AS
BEGIN
SET NOCOUNT ON;
INSERT INTO dbo.logOrders
SELECT * FROM DELETED;
INSERT INTO dbo.logOrdersItems
SELECT oi.* FROM OrdersItems oi
WHERE oi.OrderID IN (SELECT OrderId FROM DELETED);
END
GO
For OrdersItems:
CREATE TRIGGER dbo.OrdersItems_trigger
ON dbo.OrdersItems
AFTER DELETE,UPDATE
AS
BEGIN
SET NOCOUNT ON;
--Inerst the changed/deleted OrdersItems into the log
INSERT INTO dbo.logOrdersItems
SELECT * FROM DELETED
--Inserts the unchanged sibling OrdersItems records into the log
INSERT INTO dbo.logOrdersItems
SELECT oi.* FROM OrdersItems oi
WHERE oi.OrderId IN (SELECT DISTINCT OrderId FROM DELETED)
AND oi.OrderItemsID NOT IN (SELECT DISTINCT OrderItemsID FROM DELETED);
INSERT INTO dbo.logOrders
SELECT o.* FROM Orders o
WHERE o.OrderID IN (SELECT DISTINCT OrderId FROM DELETED);
END
GO
The Orders Trigger is fairly straightforward. Use the virtual DELETED table to insert the original version of the records into the log. Then join to the child OrdersItems records and insert them into the log as well. The way this is written, it will work even if you update or delete multiple Order records at a time.
The OrdersItems Trigger is a bit more complicated. You need to log the pre-chage version of the OrdersItems and Orders Records. But you also want (I think) to log the unchanged "sibling" OrdersItems records as well so that you have a complete picture of the records.
I know this is just your sample data, but you will want to add some kind of a timestamp to the records in the log tables. Otherwise you just end up with a bunch of duplicate rows and you cannot tell which is which. At the beginning of the trigger you can create a variable to hold the update datetime and then append that to your INSERT statement for the logs.
DECLARE #UpdateDateTime DATETIME;
SET #UpdateDateTime = GETUTCDATE();

How to get Identity of new records INSERTED into table with INSTEAD OF trigger

I am using an INSTEAD OF insert trigger on a table to set an incrementing version number on the row and also copy the row to a 2nd history/audit table.
The rows are inserted to both tables without a problem.
However, I am having trouble returning the new identity from the 1st table back to the user.
Schema
CREATE TABLE Table1
(
id INT IDENTITY(1,1) PRIMARY KEY,
name VARCHAR(250) NOT NULL UNIQUE,
rowVersion INT NOT NULL
)
CREATE TABLE Table1History
(
id INT NOT NULL,
name VARCHAR(250) NOT NULL,
rowVersion INT NOT NULL
)
CREATE TRIGGER TRG_INS_Table1
ON Table1
INSTEAD OF INSERT
AS
DECLARE #OutputTbl TABLE (id INT, name VARCHAR(250))
BEGIN
--make the insert
INSERT INTO Table1 (name, rowVersion)
OUTPUT INSERTED.id, INSERTED.name INTO #OutputTbl(id, name)
SELECT i.name, 1
FROM INSERTED i
--copy into history table
INSERT INTO Table1History (id, name, rowVersion)
SELECT t.ID, i.name, 1
FROM INSERTED i
JOIN #OutputTbl t on i.name = t.name
END
CREATE TRIGGER TRG_UPD_Table1
ON Table1
INSTEAD OF UPDATE
AS
BEGIN
--make the update
UPDATE Table1
SET name = i.name,
rowVersion = (SELECT d.rowVersion + 1 FROM DELETED d WHERE d.id = i.id)
FROM INSERTED i
WHERE Table1.id = i.id
--copy into history table
INSERT INTO Table1History (id, name, rowVersion)
SELECT i.id ,i.name, (SELECT d.rowVersion + 1 FROM DELETED d WHERE d.id = i.id)
FROM INSERTED i
END
Joining on the name column in the insert trigger is not ideal, but it needs to handle multiple inserts at once.
eg INSERT INTO Table1 (name) VALUES('xxx'),('yyy')
Attempted Solutions
When doing an insert, SCOPE_IDENTITY is NULL.
INSERT INTO Table1(name)
VALUES('xxx')
SELECT SCOPE_IDENTITY()
or
INSERT INTO Table1(name)
VALUES('xxx')
RETURN SCOPE_IDENTITY()
I've also tried using OUTPUT - which returns 0:
DECLARE #IdentityOutput TABLE (id INT)
INSERT INTO Table1(name)
OUTPUT INSERTED.id INTO #IdentityOutput
VALUES('xxx')
SELECT id FROM #IdentityOutput
The rows are inserted fine and have IDs, but I cannot access them unless I use the below - which seems hacky:
INSERT INTO Table1(name)
VALUES('xxx')
SELECT id from Table1 WHERE name = 'xxx'
What is the proper way to get the new ID??
Solution
Impossible! You can't reliably return the identity when doing an INSERT on a table that has an INSTEAD OF trigger. Sidux's answer below is a good workaround for my situation (replace INSTEAD OF trigger with AFTER trigger and added DEFAULT columns).
CREATE TABLE Table1
(
id INT IDENTITY(1,1) PRIMARY KEY,
name VARCHAR(250) NOT NULL UNIQUE,
rowVersion INT NOT NULL
)
GO
CREATE TABLE Table1History
(
id INT NOT NULL,
name VARCHAR(250) NOT NULL,
rowVersion INT NOT NULL
)
GO
CREATE TRIGGER TRG_INS_Table1
ON Table1
INSTEAD OF INSERT
AS
DECLARE #OutputTbl TABLE (id INT, name VARCHAR(250))
BEGIN
--make the insert
INSERT INTO Table1 (name, rowVersion)
SELECT i.name, 1
FROM INSERTED i
END
GO
CREATE TRIGGER TRG_UPD_Table1
ON Table1
INSTEAD OF UPDATE
AS
BEGIN
--make the update
UPDATE Table1
SET name = i.name,
rowVersion = (SELECT d.rowVersion + 1 FROM DELETED d WHERE d.id = i.id)
FROM INSERTED i
WHERE Table1.id = i.id
END
GO
CREATE TRIGGER TRG_AFT_INS_Table1
ON Table1
AFTER INSERT, UPDATE
AS
BEGIN
INSERT INTO Table1History (id, name, rowVersion)
SELECT i.ID, i.name, i.rowversion
FROM INSERTED i
END
GO
INSERT INTO Table1 (name) VALUES('xxx'),('yyy')
SELECT * FROM Table1History
-----------------------------------------------
id name rowVersion
2 yyy 1
1 xxx 1
-----------------------------------------------
UPDATE Table1 SET name = 'xxx1' WHERE id = 1;
SELECT * FROM Table1History
-----------------------------------------------
id name rowVersion
2 yyy 1
1 xxx 1
1 xxx1 2
-----------------------------------------------
Basically you do not need TRG_INS_Table1 trigger, you can just use DEFAULT value = 1 for column and that's it. Also if you use DATETIME column instead of rowversion, you can just insert the state of INSERTED table to the history with the GETDATE() value. In that case you can order by Dtime column DESC and you have history.

Inserting batch of rows into two tables in SQL Server 2008

I have a requirement to insert multiple rows into table1 and at the same time insert a row into table2 with a pkID from table1 and a value that comes from a SP parameter.
I created a stored procedure that performs a batch insert with a table valued parameter which contains the rows to be inserted into table1. But I have a problem with inserting the row into table2 with the corresponding Id (identity) from table1, along with parameter value that I have passed.
Is there anyone who implemented this, or what is the good solution for this?
CREATE PROCEDURE [dbo].[oSP_TV_Insert]
#uID int
,#IsActive int
,#Type int -- i need to insert this in table 2
,#dTableGroup table1 READONLY -- this one is a table valued
AS
DECLARE #SQL varchar(2000)
DECLARE #table1Id int
BEGIN
INSERT INTO dbo.table1
(uID
,Name
,Contact
,Address
,City
,State
,Zip
,Phone
,Active)
SELECT
#uID
,Name
,Contact
,Address
,City
,State
,Zip
,Phone
,Active
,#G_Active
FROM #dTableGroup
--the above query will perform batch insert using the records from dTableGroup which is table valued
SET #table1ID = SCOPE_IDENTITY()
-- this below will perform inserting records to table2 with every Id inserted in table1.
Insert into table2(#table1ID , #type)
You need to temporarily store the inserted identity values and then create a second INSERT statement - using the OUTPUT clause.
Something like:
-- declare table variable to hold the ID's that are being inserted
DECLARE #InsertedIDs TABLE (ID INT)
-- insert values into table1 - output the inserted ID's into #InsertedIDs
INSERT INTO dbo.table1(ID, Name, Contact, Address, City, State, Zip, Phone, Active)
OUTPUT INSERTED.ID INTO #InsertedIDs
SELECT
#ID, Name, Contact, Address, City, State, Zip, Phone, Active, #G_Active
FROM #dTableGroup
and then you can have your second INSERT statement:
INSERT INTO dbo.table2(Table1ID, Type)
SELECT ID, #type FROM #InsertedIDs
See the MSDN docs on the OUTPUT clause for more details on what you can do with the OUTPUT clause - one of the most underused and most "unknown" features of SQL Server these days!
Another approach using OUTPUT clause and only one statement for inserting data in both destination tables:
--Parameters
DECLARE #TableGroup TABLE
(
Name NVARCHAR(100) NOT NULL
,Phone VARCHAR(10) NOT NULL
);
DECLARE #Type INT;
--End Of parameters
--Destination tables
DECLARE #FirstDestinationTable TABLE
(
FirstDestinationTableID INT IDENTITY(1,1) PRIMARY KEY
,Name NVARCHAR(100) NOT NULL
,Phone VARCHAR(10) NOT NULL
);
DECLARE #SecondDestinationTable TABLE
(
SecondDestinationTable INT IDENTITY(2,2) PRIMARY KEY
,FirstDestinationTableID INT NOT NULL
,[Type] INT NOT NULL
,CHECK([Type] > 0)
);
--End of destination tables
--Test1
--initialization
INSERT #TableGroup
VALUES ('Bogdan SAHLEAN', '0721200300')
,('Ion Ionescu', '0211002003')
,('Vasile Vasilescu', '0745600800');
SET #Type = 9;
--execution
INSERT #SecondDestinationTable (FirstDestinationTableID, [Type])
SELECT FirstINS.FirstDestinationTableID, #Type
FROM
(
INSERT #FirstDestinationTable (Name, Phone)
OUTPUT inserted.FirstDestinationTableID
SELECT tg.Name, tg.Phone
FROM #TableGroup tg
) FirstINS
--check records
SELECT *
FROM #FirstDestinationTable;
SELECT *
FROM #SecondDestinationTable;
--End of test1
--Test2
--initialization
DELETE #TableGroup;
DELETE #FirstDestinationTable;
DELETE #SecondDestinationTable;
INSERT #TableGroup
VALUES ('Ion Ionescu', '0210000000')
,('Vasile Vasilescu', '0745000000');
SET #Type = 0; --Wrong value
--execution
INSERT #SecondDestinationTable (FirstDestinationTableID, [Type])
SELECT FirstINS.FirstDestinationTableID, #Type
FROM
(
INSERT #FirstDestinationTable (Name, Phone)
OUTPUT inserted.FirstDestinationTableID
SELECT tg.Name, tg.Phone
FROM #TableGroup tg
) FirstINS
--check records
DECLARE #rc1 INT, #rc2 INT;
SELECT *
FROM #FirstDestinationTable;
SET #rc1 = ##ROWCOUNT;
SELECT *
FROM #SecondDestinationTable;
SET #rc2 = ##ROWCOUNT;
RAISERROR('[Test2 results] #FirstDestinationTable: %d rows; ##SecondDestinationTable: %d rows;',1,1,#rc1,#rc2);
--End of test1
Since you need all inserted identity values, look at the output clause of the insert statement: http://msdn.microsoft.com/en-us/library/ms177564.aspx

Resources