Concatenating with Cursor - sql-server

I really want to learn and understand how to concatenate strings with the cursor approach.
Here is my table:
declare #t table (id int, city varchar(15))
insert into #t values
(1, 'Rome')
,(1, 'Dallas')
,(2, 'Berlin')
,(2, 'Rome')
,(2, 'Tokyo')
,(3, 'Miami')
,(3, 'Bergen')
I am trying to create a table that has all cities for each ID within one line sorted alphabetically.
ID City
1 Dallas, Rome
2 Berlin, Rome, Tokyo
3 Bergen, Miami
This is my code so far but it is not working and if somebody could walk me through each step I would be very happy and eager to learn it!
set nocount on
declare #tid int
declare #tcity varchar(15)
declare CityCursor CURSOR FOR
select * from #t
order by id, city
open CityCursor
fetch next from CityCursor into #tid, #tcity
while ( ##FETCH_STATUS = 0)
begin
if #tid = #tid -- my idea add all cities in one line within each id
print cast(#tid as varchar(2)) + ', '+ #tcity
else if #tid <> #tid --when it reaches a new id and we went through all cities it starts over for the next line
fetch next from CityCursor into #tid, #tcity
end
close CityCursor
deallocate CityCursor
select * from CityCursor

First, for future readers: A cursor, as Sean Lange wrote in his comment, is the wrong tool for this job. The correct way to do it is using a subquery with for xml.
However, since you wanted to know how to do it with a cursor, you where actually pretty close. Here is a working example:
set nocount on
declare #prevId int,
#tid int,
#tcity varchar(15)
declare #cursorResult table (id int, city varchar(32))
-- if you are expecting more than two cities for the same id,
-- the city column should be longer
declare CityCursor CURSOR FOR
select * from #t
order by id, city
open CityCursor
fetch next from CityCursor into #tid, #tcity
while ( ##FETCH_STATUS = 0)
begin
if #prevId is null or #prevId != #tid
insert into #cursorResult(id, city) values (#tid, #tcity)
else
update #cursorResult
set city = city +', '+ #tcity
where id = #tid
set #prevId = #tid
fetch next from CityCursor into #tid, #tcity
end
close CityCursor
deallocate CityCursor
select * from #cursorResult
results:
id city
1 Dallas, Rome
2 Berlin, Rome, Tokyo
3 Bergen, Miami
I've used another variable to keep the previous id value, and also inserted the results of the cursor into a table variable.

I have written nested cursor to sync with distinct city id. Although it has performance issue, you can try the following procedure
CREATE PROCEDURE USP_CITY
AS
BEGIN
set nocount on
declare #mastertid int
declare #detailstid int
declare #tcity varchar(MAX)
declare #finalCity varchar(MAX)
SET #finalCity = ''
declare #t table (id int, city varchar(max))
insert into #t values
(1, 'Rome')
,(1, 'Dallas')
,(2, 'Berlin')
,(2, 'Rome')
,(2, 'Tokyo')
,(3, 'Miami')
,(3, 'Bergen')
declare #finaltable table (id int, city varchar(max))
declare MasterCityCursor CURSOR FOR
select distinct id from #t
order by id
open MasterCityCursor
fetch next from MasterCityCursor into #mastertid
while ( ##FETCH_STATUS = 0)
begin
declare DetailsCityCursor CURSOR FOR
SELECT id,city from #t order by id
open DetailsCityCursor
fetch next from DetailsCityCursor into #detailstid,#tcity
while ( ##FETCH_STATUS = 0)
begin
if #mastertid = #detailstid
begin
SET #finalCity = #finalCity + CASE #finalCity WHEN '' THEN +'' ELSE ', ' END + #tcity
end
fetch next from DetailsCityCursor into #detailstid, #tcity
end
insert into #finaltable values(#mastertid,#finalCity)
SET #finalCity = ''
close DetailsCityCursor
deallocate DetailsCityCursor
fetch next from MasterCityCursor into #mastertid
end
close MasterCityCursor
deallocate MasterCityCursor
SELECT * FROM #finaltable
END
If you will face any problem, feel free to write in comment section. Thanks

Using a cursor for this is probably the slowest possible solution. If performance is important then there are three valid approaches. The first approach is FOR XML without special XML character protection.
declare #t table (id int, city varchar(15))
insert into #t values (1, 'Rome'),(1, 'Dallas'),(2, 'Berlin'),(2, 'Rome'),(2, 'Tokyo'),
(3, 'Miami'),(3, 'Bergen');
SELECT
t.id,
city = STUFF((
SELECT ',' + t2.city
FROM #t t2
WHERE t.id = t2.id
FOR XML PATH('')),1,1,'')
FROM #t as t
GROUP BY t.id;
The drawback to this approach is when you add a reserved XML character such as &, <, or >, you will get an XML entity back (e.g. "&amp" for "&"). To handle that you have to modify your query to look like this:
Sample data
IF OBJECT_ID('tempdb..#t') IS NOT NULL DROP TABLE #t;
CREATE TABLE #t (id int, words varchar(20))
INSERT #t VALUES (1, 'blah blah'),(1, 'yada yada'),(2, 'PB&J'),(2,' is good');
SELECT
t.id,
city = STUFF((
SELECT ',' + t2.words
FROM #t t2
WHERE t.id = t2.id
FOR XML PATH(''), TYPE).value('.','varchar(1000)'),1,1,'')
FROM #t as t
GROUP BY t.id;
The downside to this approach is that it will be slower. The good news (and another reason this approach is 100 times better than a cursor) is that both of these queries benefit greatly when the optimizer chooses a parallel execution plan.
The best approach is a new fabulous function available in SQL Server 2017, STRING_AGG. STRING_AGG does not have the problem with special XML characters and is, by far the cleanest approach:
SELECT t.id, STRING_AGG(t.words,',') WITHIN GROUP (ORDER BY t.id)
FROM #t as t
GROUP BY t.id;

Related

Insert Into Table with String Insert Or Table Type

I have a table called #Tbl1, Each GROUP is 1 row and I have to extract the number of rows for each to #Tbl_Insert type.
Declare #Tbl1 Table (TableName NVARCHAR(250),ColumnName NVARCHAR(250),DataType NVARCHAR(250),DataValue NVARCHAR(250),InGroup NVARCHAR(250))
Declare #Tbl_Insert Table (ID INT, Name NVARCHAR(250), Age INT)
-- Sample Data
Insert Into #Tbl1 values ('#Tbl_Insert','ID','INT','1','Group1'),('#Tbl_Insert','Name','NVARCHAR(250)','John.Adam','Group1'),('#Tbl_Insert','Age','INT','10','Group1')
Insert Into #Tbl1 values ('#Tbl_Insert','ID','INT','2','Group2'),('#Tbl_Insert','Name','NVARCHAR(250)','Andy.Law','Group2'),('#Tbl_Insert','Age','INT','18','Group2')
I can convert #tbl1 to row by row into #Table_TEMP
Declare #Table_TEMP (Data nvarchar(max))
Insert Into #Table_TEMP
SELECT LEFT([DataValues] , LEN([DataValues] )-1)
FROM #Tbl1 AS extern
CROSS APPLY
(
SELECT Concat('''', Replace( ISNULL([DataValue],''), '''','' ) + ''',')
FROM #Tbl1 AS intern
WHERE extern.InGroup = intern.InGroup
Order By InGroup, ColumnName
FOR XML PATH('')
) pre_trimmed ( [DataValues])
GROUP BY InGroup, [DataValues]
I have to extract the number of rows in #Tbl1 ( Or #Table_TEMP) to #Tbl_Insert.
I don't want to use cursor to loop Insert row by row in #Table_TEMP, because, when you met with big data (example > 10000 rows). It's run to slow.
Please help.
I found sample in stackorverflow
Declare #tbl_Temp Table (Data NVARCHAR(MAX))
Declare #tbl2 Table (A NVARCHAR(MAX),B NVARCHAR(MAX),C NVARCHAR(MAX))
Insert Into #tbl_Temp values ('a1*b1*c1')
INSERT INTO #tbl2 (A,B,C)
SELECT PARSENAME(REPLACE(Data,'*','.'),3)
,PARSENAME(REPLACE(Data,'*','.'),2)
,PARSENAME(REPLACE(Data,'*','.'),1)
FROM #tbl_Temp
select * from #tbl2
It's nearly the same, but,
My data have "DOT", can not use PARSENAME
I must know numbers of DOT to Build Dynamics SQL??
PARSENAME only support 3 "DOT", It's null when More Dot.
EXAMPLE:
Declare #ObjectName nVarChar(1000)
Set #ObjectName = 'HeadOfficeSQL1.Northwind.dbo.Authors'
SELECT
PARSENAME(#ObjectName, 5) as Server4,
PARSENAME(#ObjectName, 4) as Server,
PARSENAME(#ObjectName, 3) as DB,
PARSENAME(#ObjectName, 2) as Owner,
PARSENAME(#ObjectName, 1) as Object
If, i understand correctly you will need to use apply in order to fetch the records & insert the data into other table
insert into #Tbl_Insert (ID, Name, Age)
select max(a.id) [id], max(a.Name) [Name], max(a.Age) [Age] from #Tbl1 t
cross apply
(values
(case when t.ColumnName = 'ID' then t.DataValue end,
case when t.ColumnName = 'Name' then t.DataValue end,
case when t.ColumnName = 'Age' then t.DataValue end, t.InGroup)
) as a(id, Name, Age, [Group])
group by a.[Group]
select * from #Tbl_Insert
I do both #Tbl_Insert & create 1 store to do like PARSENAME. It's improved performance.
create function dbo.fnGetCsvPart(#csv varchar(8000),#index tinyint, #last bit = 0)
returns varchar(4000)
as
/* function to retrieve 0 based "column" from csv string */
begin
declare #i int; set #i = 0
while 1 = 1
begin
if #index = 0
begin
if #last = 1 or charindex(',',#csv,#i+1) = 0
return substring(#csv,#i+1,len(#csv)-#i+1)
else
return substring(#csv,#i+1,charindex(',',#csv,#i+1)-#i-1)
end
select #index = #index-1, #i = charindex(',',#csv,#i+1)
if #i = 0 break
end
return null
end
GO

How to call a recursive function in sql server

I have a table as follows
cat_id Cat_Name Main_Cat_Id
1 veg null
2 main course 1
3 starter 1
4 Indian 2
5 mexican 2
6 tahi 3
7 chinese 3
8 nonveg null
9 main course 8
10 indian 9
11 starter 8
12 tahi 11
13 chinese 11
(Main_Cat_Id is cat_id of previously added category in which it belongs)
This table is used for the categories the product where veg category has the two sub category main course and starter which is identify by main_cat_id
and those subcategories again has sub category as indian and mexican
And this categorization is dependent on the user; he can add more sub categories to indian, mexican also so that he can have any level of categorization
now I have to select all the subcategories of any node like if I take veg i have to select
(1)veg > (2)main course(1) > (4)indian(2)
> (5)mexican(2)
> (3)starter(1) > (6)thai(3)
> (7)chinese(3)
to form the string as 1,2,4,5,3,6,7
to do this i wrote a sql function as
CREATE FUNCTION [dbo].[GetSubCategory_TEST]
( #MainCategory int, #Category varchar(max))
RETURNS varchar(max)
AS
BEGIN
IF EXISTS (SELECT Cat_Id FROM Category WHERE Main_Cat_Id=#MainCategory)
BEGIN
DECLARE #TEMP TABLE
(
CAT_ID INT
)
INSERT INTO #TEMP(CAT_ID) SELECT Cat_Id FROM Category WHERE Main_Cat_Id=#MainCategory
DECLARE #TEMP_CAT_ID INT
DECLARE CUR_CAT_ID CURSOR FOR SELECT CAT_ID FROM #TEMP
OPEN CUR_CAT_ID
WHILE 1 =1
BEGIN
FETCH NEXT FROM CUR_CAT_ID
INTO #TEMP_CAT_ID;
IF ##FETCH_STATUS <> 0
SET #Category=#Category+','+ CONVERT(VARCHAR(50), #TEMP_CAT_ID)
SET #Category = [dbo].[GetSubCategory](#TEMP_CAT_ID,#Category)
END
CLOSE CUR_CAT_ID
DEALLOCATE CUR_CAT_ID
END
return #Category
END
but this function keep on executing and not gives the desired output i don't understands what wrong is going on plz help me to get this
You dont need a recursive function to build this, you can use a Recursive CTE for that.
Something like
DECLARE #TABLE TABLE(
cat_id INT,
Cat_Name VARCHAR(50),
Main_Cat_Id INT
)
INSERT INTO #TABLE SELECT 1,'veg',null
INSERT INTO #TABLE SELECT 2,'main course',1
INSERT INTO #TABLE SELECT 3,'starter',1
INSERT INTO #TABLE SELECT 4,'Indian',2
INSERT INTO #TABLE SELECT 5,'mexican',2
INSERT INTO #TABLE SELECT 6,'tahi',3
INSERT INTO #TABLE SELECT 7,'chinese',3
INSERT INTO #TABLE SELECT 8,'nonveg',null
INSERT INTO #TABLE SELECT 9,'main course',8
INSERT INTO #TABLE SELECT 10,'indian',9
INSERT INTO #TABLE SELECT 11,'starter',8
INSERT INTO #TABLE SELECT 12,'tahi',11
INSERT INTO #TABLE SELECT 13,'chinese',11
;WITH Recursives AS (
SELECT *,
CAST(cat_id AS VARCHAR(MAX)) + '\' ID_Path
FROM #TABLE
WHERE Main_Cat_Id IS NULL
UNION ALL
SELECT t.*,
r.ID_Path + CAST(t.cat_id AS VARCHAR(MAX)) + '\'
FROM #TABLE t INNER JOIN
Recursives r ON t.Main_Cat_Id = r.cat_id
)
SELECT *
FROM Recursives
I am ashamed, but I used #astander scipt to give string result.
First I created data you gave.
Second I collect rows which I need
And then using XML I put everything in one row (function STUFF removes first comma)
DECLARE #TABLE TABLE(
cat_id INT,
Cat_Name VARCHAR(50),
Main_Cat_Id INT
)
DECLARE #Collected TABLE(
cat_id INT
)
INSERT INTO #TABLE SELECT 1,'veg',null
INSERT INTO #TABLE SELECT 2,'main course',1
INSERT INTO #TABLE SELECT 3,'starter',1
INSERT INTO #TABLE SELECT 4,'Indian',2
INSERT INTO #TABLE SELECT 5,'mexican',2
INSERT INTO #TABLE SELECT 6,'tahi',3
INSERT INTO #TABLE SELECT 7,'chinese',3
INSERT INTO #TABLE SELECT 8,'nonveg',null
INSERT INTO #TABLE SELECT 9,'main course',8
INSERT INTO #TABLE SELECT 10,'indian',9
INSERT INTO #TABLE SELECT 11,'starter',8
INSERT INTO #TABLE SELECT 12,'tahi',11
INSERT INTO #TABLE SELECT 13,'chinese',11
INSERT INTO #TABLE SELECT 14,'chinese',6
DECLARE #nodeID INT = 1;
DECLARE #result VARCHAR(MAX);
;WITH Recursives AS (
SELECT cat_id, main_cat_id
FROM #TABLE
WHERE Cat_Id = #nodeID
UNION ALL
SELECT T.cat_id, T.main_cat_id
FROM #TABLE AS T
INNER JOIN Recursives AS R
ON t.Main_Cat_Id = r.cat_id
)
INSERT INTO #Collected
SELECT cat_id
FROM Recursives
SELECT #result = STUFF(
(SELECT ',' + CAST( cat_id AS VARCHAR)
FROM #Collected
ORDER BY cat_id
FOR XML PATH('')
), 1,1,'')
SELECT #result
Your cursor is looping infinitely because you asked it to keep going until 1 no longer equals 1:
WHILE 1 =1
1=1 is always true so the loop never ends, and you don't explicitly break out of it anywhere.
You would do well to study some examples of cursors, for example this one in the Microsoft T-SQL documentation. They are quite formulaic and the main syntax rarely needs to vary much.
The standard approach after opening the cursor is to do an initial fetch next to get the first result, then open a while loop conditional on ##FETCH_STATUS = 0 (0 meaning successful).
Because you're looking only for unsuccessful cursor fetch states inside your cursor:
IF ##FETCH_STATUS <> 0
The setting of #Category will only happen once the cursor has gone past the last row in the set. I suspect this is exactly what you don't want.
I'm also not sure about the scoping of the #Category variable, since it's an input parameter to the function; I generally create new variables inside a function to work with, but off the top of my head I'm not sure this will actually create a problem or not.
More generally, although I don't totally understand what you're trying to achieve here, a recursive function involving a cursor is probably not the right way to do it, as Adriaan Stander's answer suggests.

SQL Server - search comma separated list against all possible values

Scenario: I have a SSRS report which has a bunch of filters. One of those filters is for "Area", I get the areas like so:
SELECT DISTINCT Area FROM tblArea ORDER BY Area
I have a default value of "All" for the parameter that gets its available values from that, so the output would be like:
All
Area1
Area2
China
US
etc
In the report they could select "All" which when passed to the main stored procedure the parameter would look like:
All, Area1, Area2, China, US, etc
Now here is the issue: In the table which I query the Area column might have data like:
Area
--------
Area2,US
And the query is
SELECT * FROM tbl WHERE TPC IN (#Area) -- All,Area1,Area2,China,US,etc
And this would not find the record I am looking for.
In short, how do I compare each value in a comma separated column against a comma separated parameter?
Thanks in advance,
Your query is looking for a single value, that has a comma in it. The in doesn't parse the string.
You can do what you want using like:
where #Area = 'All' or
','+TPC+',' like '%,'+#Area+',%';
The use of comma is to prevent collisions, when one area is contained in another (say, "US" and "USSR").
In SQL, you must create a Table-valued function that will convert the comma-delimited string #Area into Table. You can then query using this table.
ALTER FUNCTION [dbo].[List_to_tbl] (#list nvarchar(MAX))
RETURNS #tbl TABLE (stringsTable nvarchar(10) NOT NULL) AS
BEGIN
DECLARE #pos int,
#nextpos int,
#valuelen int
SELECT #pos = 0, #nextpos = 1
WHILE #nextpos > 0
BEGIN
SELECT #nextpos = charindex(',', #list, #pos + 1)
SELECT #valuelen = CASE WHEN #nextpos > 0
THEN #nextpos
ELSE len(#list) + 1
END - #pos - 1
INSERT #tbl (stringsTable)
VALUES (substring(#list, #pos + 1, #valuelen))
SELECT #pos = #nextpos
END
RETURN
END
Now you can call this from you Stored procedure
Select * from TBL where TPC in (Select stringTable from List_to_tbl(#Area))
I think below example will solve your purpose
CREATE TABLE #TBL
([Id] int, [Areas] varchar(11))
;
INSERT INTO #TBL
([Id], [Areas])
VALUES
(1, 'abc,def,ghi'),
(2, 'abc,def'),
(3, 'abc')
;
DECLARE #Area VARCHAR(100) -- parameter
SET #Area = 'All,abc,def,ghi'
;with cte
as
(
select Areaxml.s.value('.','varchar(100)') as Area
from
(
select convert(xml,'<m>'+replace(#Area,',','</m><m>')+'</m>') as CArea
)t
cross apply
CArea.nodes('/m') as Areaxml(s)
)
select * FROM #TBL
where exists (select Area from cte where [Areas] LIKE '%'+ Area +'%')
DROP TABLe #TBL

How to do Fold in T-SQL?

If I have data in the following format
id subid text
1 1 Hello
1 2 World
1 3 !
2 1 B
2 2 B
2 3 Q
And would like it in this format:
id fold
1 HelloWorld!
2 BBQ
How could I accomplish it in T-SQL?
I would strongly suggest against that. That is the sort of thing that should be handled in your application layer.
But... if you must:
Concatenating Row Values in Transact-SQL
a temp table and a cursor leap to mind...
Dear Downvoters: a temp table and a cursor have got to be at least as efficient as the recursive-query and custom-function solutions accepted above. Get over your fear of cursors, sometimes they are the most efficient solution. Sometimes they are the only solution. Deal with it.
EDIT: cursor-based solution below. Note that it has none of the limitations of the non-cursor (and more complicated) solutions proposed elsewhere, and performance is probably about the same (hard to tell from a six-row table of course).
and please, don't abandon the main for-each construct of sql just because some blogger says "it's bad"; use your own judgement and some common sense. I avoid cursors whenever possible, but not to the point where the solution is not robust.
--initial data table
create table #tmp (
id int,
subid int,
txt varchar(256)
)
--populate with sample data from original question
insert into #tmp (id,subid,txt) values (1, 1, 'Hello')
insert into #tmp (id,subid,txt) values (1, 2, 'World')
insert into #tmp (id,subid,txt) values (1, 3, '!')
insert into #tmp (id,subid,txt) values (2, 1, 'B')
insert into #tmp (id,subid,txt) values (2, 2, 'B')
insert into #tmp (id,subid,txt) values (2, 3, 'Q')
--temp table for grouping results
create table #tmpgrp (
id int,
txt varchar(4000)
)
--cursor for looping through data
declare cur cursor local for
select id, subid, txt from #tmp order by id, subid
declare #id int
declare #subid int
declare #txt varchar(256)
declare #curid int
declare #curtxt varchar(4000)
open cur
fetch next from cur into #id, #subid, #txt
set #curid = #id
set #curtxt = ''
while ##FETCH_STATUS = 0 begin
if #curid <> #id begin
insert into #tmpgrp (id,txt) values (#curid,#curtxt)
set #curid = #id
set #curtxt = ''
end
set #curtxt = #curtxt + isnull(#txt,'')
fetch next from cur into #id, #subid, #txt
end
insert into #tmpgrp (id,txt) values (#curid,#curtxt)
close cur
deallocate cur
--show output
select * from #tmpgrp
--drop temp tables
drop table #tmp
drop table #tmpgrp
declare #tmp table (id int, subid int,txt varchar(256) )
--populate with sample data from original question
insert into #tmp (id,subid,txt) values (1, 1, 'Hello')
insert into #tmp (id,subid,txt) values (1, 2, 'World')
insert into #tmp (id,subid,txt) values (1, 3, '!')
insert into #tmp (id,subid,txt) values (2, 1, 'B')
insert into #tmp (id,subid,txt) values (2, 2, 'B')
insert into #tmp (id,subid,txt) values (2, 3, 'Q')
Solution
Select id, fold = (Select cast(txt as varchar(100)) from #tmp t2 where t1.id = t2.id for xml path(''))
from #tmp t1
group by t1.id
Wrap this in a function for a single execution...
DECLARE #returnValue varchar(4000)
SELECT #returnValue = ISNULL(#returnValue + ', ' + myTable.text, myTable.text)
FROM myTable
RETURN #returnValue
For a small number of records this will work... any more than 5 or 10 is too many for a SQL function and it needs to be moved to app layer as others have suggested.

Is there a way to loop through a table variable in TSQL without using a cursor?

Let's say I have the following simple table variable:
declare #databases table
(
DatabaseID int,
Name varchar(15),
Server varchar(15)
)
-- insert a bunch rows into #databases
Is declaring and using a cursor my only option if I wanted to iterate through the rows? Is there another way?
First of all you should be absolutely sure you need to iterate through each row — set based operations will perform faster in every case I can think of and will normally use simpler code.
Depending on your data it may be possible to loop using just SELECT statements as shown below:
Declare #Id int
While (Select Count(*) From ATable Where Processed = 0) > 0
Begin
Select Top 1 #Id = Id From ATable Where Processed = 0
--Do some processing here
Update ATable Set Processed = 1 Where Id = #Id
End
Another alternative is to use a temporary table:
Select *
Into #Temp
From ATable
Declare #Id int
While (Select Count(*) From #Temp) > 0
Begin
Select Top 1 #Id = Id From #Temp
--Do some processing here
Delete #Temp Where Id = #Id
End
The option you should choose really depends on the structure and volume of your data.
Note: If you are using SQL Server you would be better served using:
WHILE EXISTS(SELECT * FROM #Temp)
Using COUNT will have to touch every single row in the table, the EXISTS only needs to touch the first one (see Josef's answer below).
Just a quick note, if you are using SQL Server (2008 and above), the examples that have:
While (Select Count(*) From #Temp) > 0
Would be better served with
While EXISTS(SELECT * From #Temp)
The Count will have to touch every single row in the table, the EXISTS only needs to touch the first one.
This is how I do it:
declare #RowNum int, #CustId nchar(5), #Name1 nchar(25)
select #CustId=MAX(USERID) FROM UserIDs --start with the highest ID
Select #RowNum = Count(*) From UserIDs --get total number of records
WHILE #RowNum > 0 --loop until no more records
BEGIN
select #Name1 = username1 from UserIDs where USERID= #CustID --get other info from that row
print cast(#RowNum as char(12)) + ' ' + #CustId + ' ' + #Name1 --do whatever
select top 1 #CustId=USERID from UserIDs where USERID < #CustID order by USERID desc--get the next one
set #RowNum = #RowNum - 1 --decrease count
END
No Cursors, no temporary tables, no extra columns.
The USERID column must be a unique integer, as most Primary Keys are.
Define your temp table like this -
declare #databases table
(
RowID int not null identity(1,1) primary key,
DatabaseID int,
Name varchar(15),
Server varchar(15)
)
-- insert a bunch rows into #databases
Then do this -
declare #i int
select #i = min(RowID) from #databases
declare #max int
select #max = max(RowID) from #databases
while #i <= #max begin
select DatabaseID, Name, Server from #database where RowID = #i --do some stuff
set #i = #i + 1
end
Here is how I would do it:
Select Identity(int, 1,1) AS PK, DatabaseID
Into #T
From #databases
Declare #maxPK int;Select #maxPK = MAX(PK) From #T
Declare #pk int;Set #pk = 1
While #pk <= #maxPK
Begin
-- Get one record
Select DatabaseID, Name, Server
From #databases
Where DatabaseID = (Select DatabaseID From #T Where PK = #pk)
--Do some processing here
--
Select #pk = #pk + 1
End
[Edit] Because I probably skipped the word "variable" when I first time read the question, here is an updated response...
declare #databases table
(
PK int IDENTITY(1,1),
DatabaseID int,
Name varchar(15),
Server varchar(15)
)
-- insert a bunch rows into #databases
--/*
INSERT INTO #databases (DatabaseID, Name, Server) SELECT 1,'MainDB', 'MyServer'
INSERT INTO #databases (DatabaseID, Name, Server) SELECT 1,'MyDB', 'MyServer2'
--*/
Declare #maxPK int;Select #maxPK = MAX(PK) From #databases
Declare #pk int;Set #pk = 1
While #pk <= #maxPK
Begin
/* Get one record (you can read the values into some variables) */
Select DatabaseID, Name, Server
From #databases
Where PK = #pk
/* Do some processing here */
/* ... */
Select #pk = #pk + 1
End
If you have no choice than to go row by row creating a FAST_FORWARD cursor. It will be as fast as building up a while loop and much easier to maintain over the long haul.
FAST_FORWARD
Specifies a FORWARD_ONLY, READ_ONLY cursor with performance optimizations enabled. FAST_FORWARD cannot be specified if SCROLL or FOR_UPDATE is also specified.
This will work in SQL SERVER 2012 version.
declare #Rowcount int
select #Rowcount=count(*) from AddressTable;
while( #Rowcount>0)
begin
select #Rowcount=#Rowcount-1;
SELECT * FROM AddressTable order by AddressId desc OFFSET #Rowcount ROWS FETCH NEXT 1 ROWS ONLY;
end
Another approach without having to change your schema or using temp tables:
DECLARE #rowCount int = 0
,#currentRow int = 1
,#databaseID int
,#name varchar(15)
,#server varchar(15);
SELECT #rowCount = COUNT(*)
FROM #databases;
WHILE (#currentRow <= #rowCount)
BEGIN
SELECT TOP 1
#databaseID = rt.[DatabaseID]
,#name = rt.[Name]
,#server = rt.[Server]
FROM (
SELECT ROW_NUMBER() OVER (
ORDER BY t.[DatabaseID], t.[Name], t.[Server]
) AS [RowNumber]
,t.[DatabaseID]
,t.[Name]
,t.[Server]
FROM #databases t
) rt
WHERE rt.[RowNumber] = #currentRow;
EXEC [your_stored_procedure] #databaseID, #name, #server;
SET #currentRow = #currentRow + 1;
END
You can use a while loop:
While (Select Count(*) From #TempTable) > 0
Begin
Insert Into #Databases...
Delete From #TempTable Where x = x
End
Lightweight, without having to make extra tables, if you have an integer ID on the table
Declare #id int = 0, #anything nvarchar(max)
WHILE(1=1) BEGIN
Select Top 1 #anything=[Anything],#id=#id+1 FROM Table WHERE ID>#id
if(##ROWCOUNT=0) break;
--Process #anything
END
I really do not see the point why you would need to resort to using dreaded cursor.
But here is another option if you are using SQL Server version 2005/2008
Use Recursion
declare #databases table
(
DatabaseID int,
Name varchar(15),
Server varchar(15)
)
--; Insert records into #databases...
--; Recurse through #databases
;with DBs as (
select * from #databases where DatabaseID = 1
union all
select A.* from #databases A
inner join DBs B on A.DatabaseID = B.DatabaseID + 1
)
select * from DBs
-- [PO_RollBackOnReject] 'FININV10532'
alter procedure PO_RollBackOnReject
#CaseID nvarchar(100)
AS
Begin
SELECT *
INTO #tmpTable
FROM PO_InvoiceItems where CaseID = #CaseID
Declare #Id int
Declare #PO_No int
Declare #Current_Balance Money
While (Select ROW_NUMBER() OVER(ORDER BY PO_LineNo DESC) From #tmpTable) > 0
Begin
Select Top 1 #Id = PO_LineNo, #Current_Balance = Current_Balance,
#PO_No = PO_No
From #Temp
update PO_Details
Set Current_Balance = Current_Balance + #Current_Balance,
Previous_App_Amount= Previous_App_Amount + #Current_Balance,
Is_Processed = 0
Where PO_LineNumber = #Id
AND PO_No = #PO_No
update PO_InvoiceItems
Set IsVisible = 0,
Is_Processed= 0
,Is_InProgress = 0 ,
Is_Active = 0
Where PO_LineNo = #Id
AND PO_No = #PO_No
End
End
It's possible to use a cursor to do this:
create function [dbo].f_teste_loop
returns #tabela table
(
cod int,
nome varchar(10)
)
as
begin
insert into #tabela values (1, 'verde');
insert into #tabela values (2, 'amarelo');
insert into #tabela values (3, 'azul');
insert into #tabela values (4, 'branco');
return;
end
create procedure [dbo].[sp_teste_loop]
as
begin
DECLARE #cod int, #nome varchar(10);
DECLARE curLoop CURSOR STATIC LOCAL
FOR
SELECT
cod
,nome
FROM
dbo.f_teste_loop();
OPEN curLoop;
FETCH NEXT FROM curLoop
INTO #cod, #nome;
WHILE (##FETCH_STATUS = 0)
BEGIN
PRINT #nome;
FETCH NEXT FROM curLoop
INTO #cod, #nome;
END
CLOSE curLoop;
DEALLOCATE curLoop;
end
I'm going to provide the set-based solution.
insert #databases (DatabaseID, Name, Server)
select DatabaseID, Name, Server
From ... (Use whatever query you would have used in the loop or cursor)
This is far faster than any looping techique and is easier to write and maintain.
I prefer using the Offset Fetch if you have a unique ID you can sort your table by:
DECLARE #TableVariable (ID int, Name varchar(50));
DECLARE #RecordCount int;
SELECT #RecordCount = COUNT(*) FROM #TableVariable;
WHILE #RecordCount > 0
BEGIN
SELECT ID, Name FROM #TableVariable ORDER BY ID OFFSET #RecordCount - 1 FETCH NEXT 1 ROW;
SET #RecordCount = #RecordCount - 1;
END
This way I don't need to add fields to the table or use a window function.
I agree with the previous post that set-based operations will typically perform better, but if you do need to iterate over the rows here's the approach I would take:
Add a new field to your table variable (Data Type Bit, default 0)
Insert your data
Select the Top 1 Row where fUsed = 0 (Note: fUsed is the name of the field in step 1)
Perform whatever processing you need to do
Update the record in your table variable by setting fUsed = 1 for the record
Select the next unused record from the table and repeat the process
DECLARE #databases TABLE
(
DatabaseID int,
Name varchar(15),
Server varchar(15),
fUsed BIT DEFAULT 0
)
-- insert a bunch rows into #databases
DECLARE #DBID INT
SELECT TOP 1 #DBID = DatabaseID from #databases where fUsed = 0
WHILE ##ROWCOUNT <> 0 and #DBID IS NOT NULL
BEGIN
-- Perform your processing here
--Update the record to "used"
UPDATE #databases SET fUsed = 1 WHERE DatabaseID = #DBID
--Get the next record
SELECT TOP 1 #DBID = DatabaseID from #databases where fUsed = 0
END
Step1: Below select statement creates a temp table with unique row number for each record.
select eno,ename,eaddress,mobno int,row_number() over(order by eno desc) as rno into #tmp_sri from emp
Step2:Declare required variables
DECLARE #ROWNUMBER INT
DECLARE #ename varchar(100)
Step3: Take total rows count from temp table
SELECT #ROWNUMBER = COUNT(*) FROM #tmp_sri
declare #rno int
Step4: Loop temp table based on unique row number create in temp
while #rownumber>0
begin
set #rno=#rownumber
select #ename=ename from #tmp_sri where rno=#rno **// You can take columns data from here as many as you want**
set #rownumber=#rownumber-1
print #ename **// instead of printing, you can write insert, update, delete statements**
end
This approach only requires one variable and does not delete any rows from #databases. I know there are a lot of answers here, but I don't see one that uses MIN to get your next ID like this.
DECLARE #databases TABLE
(
DatabaseID int,
Name varchar(15),
Server varchar(15)
)
-- insert a bunch rows into #databases
DECLARE #CurrID INT
SELECT #CurrID = MIN(DatabaseID)
FROM #databases
WHILE #CurrID IS NOT NULL
BEGIN
-- Do stuff for #CurrID
SELECT #CurrID = MIN(DatabaseID)
FROM #databases
WHERE DatabaseID > #CurrID
END
Here's my solution, which makes use of an infinite loop, the BREAK statement, and the ##ROWCOUNT function. No cursors or temporary table are necessary, and I only need to write one query to get the next row in the #databases table:
declare #databases table
(
DatabaseID int,
[Name] varchar(15),
[Server] varchar(15)
);
-- Populate the [#databases] table with test data.
insert into #databases (DatabaseID, [Name], [Server])
select X.DatabaseID, X.[Name], X.[Server]
from (values
(1, 'Roger', 'ServerA'),
(5, 'Suzy', 'ServerB'),
(8675309, 'Jenny', 'TommyTutone')
) X (DatabaseID, [Name], [Server])
-- Create an infinite loop & ensure that a break condition is reached in the loop code.
declare #databaseId int;
while (1=1)
begin
-- Get the next database ID.
select top(1) #databaseId = DatabaseId
from #databases
where DatabaseId > isnull(#databaseId, 0);
-- If no rows were found by the preceding SQL query, you're done; exit the WHILE loop.
if (##ROWCOUNT = 0) break;
-- Otherwise, do whatever you need to do with the current [#databases] table row here.
print 'Processing #databaseId #' + cast(#databaseId as varchar(50));
end
This is the code that I am using 2008 R2. This code that I am using is to build indexes on key fields (SSNO & EMPR_NO) n all tales
if object_ID('tempdb..#a')is not NULL drop table #a
select 'IF EXISTS (SELECT name FROM sysindexes WHERE name ='+CHAR(39)+''+'IDX_'+COLUMN_NAME+'_'+SUBSTRING(table_name,5,len(table_name)-3)+char(39)+')'
+' begin DROP INDEX [IDX_'+COLUMN_NAME+'_'+SUBSTRING(table_name,5,len(table_name)-3)+'] ON '+table_schema+'.'+table_name+' END Create index IDX_'+COLUMN_NAME+'_'+SUBSTRING(table_name,5,len(table_name)-3)+ ' on '+ table_schema+'.'+table_name+' ('+COLUMN_NAME+') ' 'Field'
,ROW_NUMBER() over (order by table_NAMe) as 'ROWNMBR'
into #a
from INFORMATION_SCHEMA.COLUMNS
where (COLUMN_NAME like '%_SSNO_%' or COLUMN_NAME like'%_EMPR_NO_')
and TABLE_SCHEMA='dbo'
declare #loopcntr int
declare #ROW int
declare #String nvarchar(1000)
set #loopcntr=(select count(*) from #a)
set #ROW=1
while (#ROW <= #loopcntr)
begin
select top 1 #String=a.Field
from #A a
where a.ROWNMBR = #ROW
execute sp_executesql #String
set #ROW = #ROW + 1
end
SELECT #pk = #pk + 1
would be better:
SET #pk += #pk
Avoid using SELECT if you are not referencing tables are are just assigning values.

Resources