SQL Server - search comma separated list against all possible values - sql-server

Scenario: I have a SSRS report which has a bunch of filters. One of those filters is for "Area", I get the areas like so:
SELECT DISTINCT Area FROM tblArea ORDER BY Area
I have a default value of "All" for the parameter that gets its available values from that, so the output would be like:
All
Area1
Area2
China
US
etc
In the report they could select "All" which when passed to the main stored procedure the parameter would look like:
All, Area1, Area2, China, US, etc
Now here is the issue: In the table which I query the Area column might have data like:
Area
--------
Area2,US
And the query is
SELECT * FROM tbl WHERE TPC IN (#Area) -- All,Area1,Area2,China,US,etc
And this would not find the record I am looking for.
In short, how do I compare each value in a comma separated column against a comma separated parameter?
Thanks in advance,

Your query is looking for a single value, that has a comma in it. The in doesn't parse the string.
You can do what you want using like:
where #Area = 'All' or
','+TPC+',' like '%,'+#Area+',%';
The use of comma is to prevent collisions, when one area is contained in another (say, "US" and "USSR").

In SQL, you must create a Table-valued function that will convert the comma-delimited string #Area into Table. You can then query using this table.
ALTER FUNCTION [dbo].[List_to_tbl] (#list nvarchar(MAX))
RETURNS #tbl TABLE (stringsTable nvarchar(10) NOT NULL) AS
BEGIN
DECLARE #pos int,
#nextpos int,
#valuelen int
SELECT #pos = 0, #nextpos = 1
WHILE #nextpos > 0
BEGIN
SELECT #nextpos = charindex(',', #list, #pos + 1)
SELECT #valuelen = CASE WHEN #nextpos > 0
THEN #nextpos
ELSE len(#list) + 1
END - #pos - 1
INSERT #tbl (stringsTable)
VALUES (substring(#list, #pos + 1, #valuelen))
SELECT #pos = #nextpos
END
RETURN
END
Now you can call this from you Stored procedure
Select * from TBL where TPC in (Select stringTable from List_to_tbl(#Area))

I think below example will solve your purpose
CREATE TABLE #TBL
([Id] int, [Areas] varchar(11))
;
INSERT INTO #TBL
([Id], [Areas])
VALUES
(1, 'abc,def,ghi'),
(2, 'abc,def'),
(3, 'abc')
;
DECLARE #Area VARCHAR(100) -- parameter
SET #Area = 'All,abc,def,ghi'
;with cte
as
(
select Areaxml.s.value('.','varchar(100)') as Area
from
(
select convert(xml,'<m>'+replace(#Area,',','</m><m>')+'</m>') as CArea
)t
cross apply
CArea.nodes('/m') as Areaxml(s)
)
select * FROM #TBL
where exists (select Area from cte where [Areas] LIKE '%'+ Area +'%')
DROP TABLe #TBL

Related

Concatenating with Cursor

I really want to learn and understand how to concatenate strings with the cursor approach.
Here is my table:
declare #t table (id int, city varchar(15))
insert into #t values
(1, 'Rome')
,(1, 'Dallas')
,(2, 'Berlin')
,(2, 'Rome')
,(2, 'Tokyo')
,(3, 'Miami')
,(3, 'Bergen')
I am trying to create a table that has all cities for each ID within one line sorted alphabetically.
ID City
1 Dallas, Rome
2 Berlin, Rome, Tokyo
3 Bergen, Miami
This is my code so far but it is not working and if somebody could walk me through each step I would be very happy and eager to learn it!
set nocount on
declare #tid int
declare #tcity varchar(15)
declare CityCursor CURSOR FOR
select * from #t
order by id, city
open CityCursor
fetch next from CityCursor into #tid, #tcity
while ( ##FETCH_STATUS = 0)
begin
if #tid = #tid -- my idea add all cities in one line within each id
print cast(#tid as varchar(2)) + ', '+ #tcity
else if #tid <> #tid --when it reaches a new id and we went through all cities it starts over for the next line
fetch next from CityCursor into #tid, #tcity
end
close CityCursor
deallocate CityCursor
select * from CityCursor
First, for future readers: A cursor, as Sean Lange wrote in his comment, is the wrong tool for this job. The correct way to do it is using a subquery with for xml.
However, since you wanted to know how to do it with a cursor, you where actually pretty close. Here is a working example:
set nocount on
declare #prevId int,
#tid int,
#tcity varchar(15)
declare #cursorResult table (id int, city varchar(32))
-- if you are expecting more than two cities for the same id,
-- the city column should be longer
declare CityCursor CURSOR FOR
select * from #t
order by id, city
open CityCursor
fetch next from CityCursor into #tid, #tcity
while ( ##FETCH_STATUS = 0)
begin
if #prevId is null or #prevId != #tid
insert into #cursorResult(id, city) values (#tid, #tcity)
else
update #cursorResult
set city = city +', '+ #tcity
where id = #tid
set #prevId = #tid
fetch next from CityCursor into #tid, #tcity
end
close CityCursor
deallocate CityCursor
select * from #cursorResult
results:
id city
1 Dallas, Rome
2 Berlin, Rome, Tokyo
3 Bergen, Miami
I've used another variable to keep the previous id value, and also inserted the results of the cursor into a table variable.
I have written nested cursor to sync with distinct city id. Although it has performance issue, you can try the following procedure
CREATE PROCEDURE USP_CITY
AS
BEGIN
set nocount on
declare #mastertid int
declare #detailstid int
declare #tcity varchar(MAX)
declare #finalCity varchar(MAX)
SET #finalCity = ''
declare #t table (id int, city varchar(max))
insert into #t values
(1, 'Rome')
,(1, 'Dallas')
,(2, 'Berlin')
,(2, 'Rome')
,(2, 'Tokyo')
,(3, 'Miami')
,(3, 'Bergen')
declare #finaltable table (id int, city varchar(max))
declare MasterCityCursor CURSOR FOR
select distinct id from #t
order by id
open MasterCityCursor
fetch next from MasterCityCursor into #mastertid
while ( ##FETCH_STATUS = 0)
begin
declare DetailsCityCursor CURSOR FOR
SELECT id,city from #t order by id
open DetailsCityCursor
fetch next from DetailsCityCursor into #detailstid,#tcity
while ( ##FETCH_STATUS = 0)
begin
if #mastertid = #detailstid
begin
SET #finalCity = #finalCity + CASE #finalCity WHEN '' THEN +'' ELSE ', ' END + #tcity
end
fetch next from DetailsCityCursor into #detailstid, #tcity
end
insert into #finaltable values(#mastertid,#finalCity)
SET #finalCity = ''
close DetailsCityCursor
deallocate DetailsCityCursor
fetch next from MasterCityCursor into #mastertid
end
close MasterCityCursor
deallocate MasterCityCursor
SELECT * FROM #finaltable
END
If you will face any problem, feel free to write in comment section. Thanks
Using a cursor for this is probably the slowest possible solution. If performance is important then there are three valid approaches. The first approach is FOR XML without special XML character protection.
declare #t table (id int, city varchar(15))
insert into #t values (1, 'Rome'),(1, 'Dallas'),(2, 'Berlin'),(2, 'Rome'),(2, 'Tokyo'),
(3, 'Miami'),(3, 'Bergen');
SELECT
t.id,
city = STUFF((
SELECT ',' + t2.city
FROM #t t2
WHERE t.id = t2.id
FOR XML PATH('')),1,1,'')
FROM #t as t
GROUP BY t.id;
The drawback to this approach is when you add a reserved XML character such as &, <, or >, you will get an XML entity back (e.g. "&amp" for "&"). To handle that you have to modify your query to look like this:
Sample data
IF OBJECT_ID('tempdb..#t') IS NOT NULL DROP TABLE #t;
CREATE TABLE #t (id int, words varchar(20))
INSERT #t VALUES (1, 'blah blah'),(1, 'yada yada'),(2, 'PB&J'),(2,' is good');
SELECT
t.id,
city = STUFF((
SELECT ',' + t2.words
FROM #t t2
WHERE t.id = t2.id
FOR XML PATH(''), TYPE).value('.','varchar(1000)'),1,1,'')
FROM #t as t
GROUP BY t.id;
The downside to this approach is that it will be slower. The good news (and another reason this approach is 100 times better than a cursor) is that both of these queries benefit greatly when the optimizer chooses a parallel execution plan.
The best approach is a new fabulous function available in SQL Server 2017, STRING_AGG. STRING_AGG does not have the problem with special XML characters and is, by far the cleanest approach:
SELECT t.id, STRING_AGG(t.words,',') WITHIN GROUP (ORDER BY t.id)
FROM #t as t
GROUP BY t.id;

Insert Into Table with String Insert Or Table Type

I have a table called #Tbl1, Each GROUP is 1 row and I have to extract the number of rows for each to #Tbl_Insert type.
Declare #Tbl1 Table (TableName NVARCHAR(250),ColumnName NVARCHAR(250),DataType NVARCHAR(250),DataValue NVARCHAR(250),InGroup NVARCHAR(250))
Declare #Tbl_Insert Table (ID INT, Name NVARCHAR(250), Age INT)
-- Sample Data
Insert Into #Tbl1 values ('#Tbl_Insert','ID','INT','1','Group1'),('#Tbl_Insert','Name','NVARCHAR(250)','John.Adam','Group1'),('#Tbl_Insert','Age','INT','10','Group1')
Insert Into #Tbl1 values ('#Tbl_Insert','ID','INT','2','Group2'),('#Tbl_Insert','Name','NVARCHAR(250)','Andy.Law','Group2'),('#Tbl_Insert','Age','INT','18','Group2')
I can convert #tbl1 to row by row into #Table_TEMP
Declare #Table_TEMP (Data nvarchar(max))
Insert Into #Table_TEMP
SELECT LEFT([DataValues] , LEN([DataValues] )-1)
FROM #Tbl1 AS extern
CROSS APPLY
(
SELECT Concat('''', Replace( ISNULL([DataValue],''), '''','' ) + ''',')
FROM #Tbl1 AS intern
WHERE extern.InGroup = intern.InGroup
Order By InGroup, ColumnName
FOR XML PATH('')
) pre_trimmed ( [DataValues])
GROUP BY InGroup, [DataValues]
I have to extract the number of rows in #Tbl1 ( Or #Table_TEMP) to #Tbl_Insert.
I don't want to use cursor to loop Insert row by row in #Table_TEMP, because, when you met with big data (example > 10000 rows). It's run to slow.
Please help.
I found sample in stackorverflow
Declare #tbl_Temp Table (Data NVARCHAR(MAX))
Declare #tbl2 Table (A NVARCHAR(MAX),B NVARCHAR(MAX),C NVARCHAR(MAX))
Insert Into #tbl_Temp values ('a1*b1*c1')
INSERT INTO #tbl2 (A,B,C)
SELECT PARSENAME(REPLACE(Data,'*','.'),3)
,PARSENAME(REPLACE(Data,'*','.'),2)
,PARSENAME(REPLACE(Data,'*','.'),1)
FROM #tbl_Temp
select * from #tbl2
It's nearly the same, but,
My data have "DOT", can not use PARSENAME
I must know numbers of DOT to Build Dynamics SQL??
PARSENAME only support 3 "DOT", It's null when More Dot.
EXAMPLE:
Declare #ObjectName nVarChar(1000)
Set #ObjectName = 'HeadOfficeSQL1.Northwind.dbo.Authors'
SELECT
PARSENAME(#ObjectName, 5) as Server4,
PARSENAME(#ObjectName, 4) as Server,
PARSENAME(#ObjectName, 3) as DB,
PARSENAME(#ObjectName, 2) as Owner,
PARSENAME(#ObjectName, 1) as Object
If, i understand correctly you will need to use apply in order to fetch the records & insert the data into other table
insert into #Tbl_Insert (ID, Name, Age)
select max(a.id) [id], max(a.Name) [Name], max(a.Age) [Age] from #Tbl1 t
cross apply
(values
(case when t.ColumnName = 'ID' then t.DataValue end,
case when t.ColumnName = 'Name' then t.DataValue end,
case when t.ColumnName = 'Age' then t.DataValue end, t.InGroup)
) as a(id, Name, Age, [Group])
group by a.[Group]
select * from #Tbl_Insert
I do both #Tbl_Insert & create 1 store to do like PARSENAME. It's improved performance.
create function dbo.fnGetCsvPart(#csv varchar(8000),#index tinyint, #last bit = 0)
returns varchar(4000)
as
/* function to retrieve 0 based "column" from csv string */
begin
declare #i int; set #i = 0
while 1 = 1
begin
if #index = 0
begin
if #last = 1 or charindex(',',#csv,#i+1) = 0
return substring(#csv,#i+1,len(#csv)-#i+1)
else
return substring(#csv,#i+1,charindex(',',#csv,#i+1)-#i-1)
end
select #index = #index-1, #i = charindex(',',#csv,#i+1)
if #i = 0 break
end
return null
end
GO

T- SQL Capture data Overflow errors

Aim - To capture data overflow errors from source to destination tbls.
I've have a source table that is being dumped with data and column data type is varchar
While the destination table has specific column type like decimal (12, 5), or Int etc...
Is the anyway I can find all the rows that don't fit the spec and flag it so that they are not part of insert and hence not cause the script to fail..
MS-SQL 2008 R2
Yes. You can do it by checking whether the value in the column of source table is a number before inserting to the destination table using ISNUMERIC in SQL Server.
SOURCE TABLE
CREATE TABLE #SOURCE(SCOL VARCHAR(300))
INSERT INTO #SOURCE
SELECT 'ABC'
UNION ALL
SELECT '121'
UNION ALL
SELECT '-145.78'
UNION ALL
SELECT '200,000'
UNION ALL
SELECT N'£100.20'
UNION ALL
SELECT '0E0'
UNION ALL
SELECT N'₤'
DESTINATION TABLE
CREATE TABLE #DESTINATION(DCOL NUMERIC(5,2))
-- Only the numeric values will be inserted and is Type-safe
INSERT INTO #DESTINATION
SELECT SCOL
FROM #SOURCE
WHERE ISNUMERIC(SCOL)=1
SELECT * FROM #DESTINATION
EDIT :
If ISNUMERIC doesn't satisfy the values in your column, create the below function and check the value in your column in WHERE clause. I got the function from here.
CREATE FUNCTION dbo.isReallyNumeric
(
#num VARCHAR(64)
)
RETURNS BIT
BEGIN
IF LEFT(#num, 1) = '-'
SET #num = SUBSTRING(#num, 2, LEN(#num))
DECLARE #pos TINYINT
SET #pos = 1 + LEN(#num) - CHARINDEX('.', REVERSE(#num))
RETURN CASE
WHEN PATINDEX('%[^0-9.-]%', #num) = 0
AND #num NOT IN ('.', '-', '+', '^')
AND LEN(#num)>0
AND #num NOT LIKE '%-%'
AND
(
((#pos = LEN(#num)+1)
OR #pos = CHARINDEX('.', #num))
)
THEN
1
ELSE
0
END
END
GO
And in the query to select and insert, use the above function
INSERT INTO #DESTINATION
SELECT SCOL
FROM #SOURCE
WHERE DBO.isReallyNumeric(SCOL)=1

SQL: iterating through a list that contains some ranges

I am trying to get information for products that have an ID that is contained in a list. The problem is that the list contains some single values and some range values:
PX03 - PX069, PX20, PX202, PX25 - PX270, PX250 - PX2509, PX251, PX2511 -
PX2513
Basically what I am looking for is some way to take a list or string containing both values and ranges and the end output is a table or list that has all of the values within the ranges individually so that I can loop through them.
I have a stored procedure that loops through all the ID's in the main products table that use the 'PX' prefix, but the table has all ids (i.e. PX 1 - 9999, LX 00001 - 99999) and I only want to search through those contained in the above list. I could write it out all the id's individually but some of the ranges contain many values, which would be time consuming to go through.
My idea was to create a separate table containing this list, in which there would be three columns: an identity column, and then one column each for the beginning and end of the range. Any items that do not have a range would just have the same value for beginning and end range, i.e.:
----------------------------------
rownum | range_start | range_end|
----------------------------------
1 PX03 PX069
2 PX20 PX20
3 PX202 PX202
4 PX25 PX25
5 PX250 PX2509
and then populating a table using something like:
SELECT id from product_table
WHERE id BETWEEN listtable.range_start AND listtable.range_end
where product_table is my original table with the product id's and their information and listtable is the new table I just created. This would give me:
id|
---
PX03
PX030
PX031
PX032
PX033
.
.
.
PX067
PX068
PX069
PX20
PX202
PX25
PX250
PX251
etc.
but I am thinking I would need to iterate through the list and I am not sure how to do that. Any ideas, hints or suggestions?
UPDATE
After creating the table using the solution given by #asantaballa, it was as simple as using an inner join:
SELECT d.id
FROM product_table d
INNER JOIN #RangeTable r
ON d.id BETWEEN r.RangeFrom AND r.RangeTo
See if this works for you for the part about converting the string to a table.
Declare #StrList Varchar(1000) = 'PX03 - PX069, PX20, PX202, PX25 - PX270, PX250 - PX2509, PX251, PX2511 - PX2513'
Declare #RangeTable Table (RangeFrom VarChar(32), RangeTo VarChar(32))
Select #StrList = Replace(#StrList,' ', '') + ','
Declare #StrListItem Varchar(32)
While CHARINDEX(',', #StrList) > 0
Begin
Select #StrListItem = SUBSTRING(#StrList,1,CHARINDEX(',', #StrList) - 1)
Declare
#RangeFrom VarChar(32)
, #RangeTo VarChar(32)
If CHARINDEX('-', #StrListItem) = 0
Begin
Select
#RangeFrom = #StrListItem
, #RangeTo = #StrListItem
End
Else
Begin
Select
#RangeFrom = SUBSTRING(#StrListItem, 1, CHARINDEX('-', #StrListItem) - 1)
, #RangeTo = SUBSTRING(#StrListItem, CHARINDEX('-', #StrListItem) + 1, LEN(#StrListItem) - CHARINDEX('-', #StrListItem))
End
Insert Into #RangeTable (RangeFrom, RangeTo) Values (#RangeFrom, #RangeTo)
Select #StrList = SUBSTRING(#StrList, CHARINDEX(',', #StrList) + 1, LEN(#StrList) - CHARINDEX(',', #StrList))
End
Select * From #RangeTable
Here is your string and product_table
DECLARE #STR VARCHAR(100) = 'PX03 - PX069, PX20, PX202, PX25 - PX270, PX250 - PX2509, PX251, PX2511 - PX2513'
SELECT * INTO #product_table
FROM
(
SELECT 'PX4' PRODID
UNION ALL
SELECT 'PX26'
UNION ALL
SELECT 'PX75'
UNION ALL
SELECT 'PX77'
)TAB
Now create a table to hold the value
CREATE TABLE #listtable(ROWNUM int IDENTITY(1,1),range_start VARCHAR(100),range_end VARCHAR(100))
Now insert the splitted value to the table.
INSERT INTO #listtable
SELECT
ISNULL(PARSENAME(REPLACE(Split.a.value('.', 'VARCHAR(100)'),'-','.'),2),Split.a.value('.', 'VARCHAR(100)')) 'range_start' ,
PARSENAME(REPLACE(Split.a.value('.', 'VARCHAR(100)'),'-','.'),1) 'range_end'
FROM
(
SELECT CAST ('<M>' + REPLACE(#STR, ',', '</M><M>') + '</M>' AS XML) AS Data
) AS A
CROSS APPLY Data.nodes ('/M') AS Split(a)
Since Id is string, you need a function to extract numbers from Id(function created by God of SQL Server - Pinal Dave)
CREATE FUNCTION dbo.udf_GetNumeric
(#strAlphaNumeric VARCHAR(256))
RETURNS VARCHAR(256)
AS
BEGIN
DECLARE #intAlpha INT
SET #intAlpha = PATINDEX('%[^0-9]%', #strAlphaNumeric)
BEGIN
WHILE #intAlpha > 0
BEGIN
SET #strAlphaNumeric = STUFF(#strAlphaNumeric, #intAlpha, 1, '' )
SET #intAlpha = PATINDEX('%[^0-9]%', #strAlphaNumeric )
END
END
RETURN ISNULL(#strAlphaNumeric,0)
END
First of all keep in mind that we will not get PX1,PX2,PX3,PX4 if you give id BETWEEN listtable.range_start AND listtable.range_end because those are of varchar type and not numbers. So we need to extract numbers from each PX and get the values between them and append PX.
Here is the query which filters the IDs in product_table which are in the range between listtable
;WITH CTE AS
(
SELECT ROWNUM,CAST(dbo.udf_GetNumeric(range_start)AS INT) NUMBERS,
CAST(dbo.udf_GetNumeric(range_end)AS INT) RTO1
FROM #listtable
UNION ALL
SELECT T.ROWNUM,NUMBERS+1,RTO1
FROM #listtable T
JOIN CTE ON CTE.ROWNUM = T.ROWNUM
WHERE NUMBERS < RTO1
)
SELECT PRODID IDS--,ROWNUM,NUMBERS NUMS,'PX'+CAST(NUMBERS AS VARCHAR(10)) IDS2
FROM CTE
JOIN #product_table ON PRODID='PX'+CAST(NUMBERS AS VARCHAR(10))
ORDER BY NUMBERS
option (MaxRecursion 0)
SQL FIDDLE

Using row count from a temporary table in a while loop SQL Server 2008

I'm trying to create a procedure in SQL Server 2008 that inserts data from a temp table into an already existing table. I think I've pretty much figured it out, I'm just having an issue with a loop. I need the row count from the temp table to determine when the loop should finish.
I've tried using ##ROWCOUNT in two different ways; using it by itself in the WHILE statement, and creating a variable to try and hold the value when the first loop has finished (see code below).
Neither of these methods have worked, and I'm now at a loss as to what to do. Is it possible to use ##ROWCOUNT in this situation, or is there another method that would work better?
CREATE PROCEDURE InsertData(#KeywordList varchar(max))
AS
BEGIN
--create temp table to hold words and weights
CREATE TABLE #tempKeywords(ID int NOT NULL, keyword varchar(10) NOT NULL);
DECLARE #K varchar(10), #Num int, #ID int
SET #KeywordList= LTRIM(RTRIM(#KeywordList))+ ','
SET #Num = CHARINDEX(',', #KeywordList, 1)
SET #ID = 0
--Parse varchar and split IDs by comma into temp table
IF REPLACE(#KeywordList, ',', '') <> ''
BEGIN
WHILE #Num > 0
BEGIN
SET #K= LTRIM(RTRIM(LEFT(#KeywordList, #Num - 1)))
SET #ID = #ID + 1
IF #K <> ''
BEGIN
INSERT INTO #tempKeywords VALUES (#ID, #K)
END
SET #KeywordList = RIGHT(#KeywordList, LEN(#KeywordList) - #Num)
SET #Num = CHARINDEX(',', #KeywordList, 1)
--rowcount of temp table
SET #rowcount = ##ROWCOUNT
END
END
--declaring variables for loop
DECLARE #count INT
DECLARE #t_name varchar(30)
DECLARE #key varchar(30)
DECLARE #key_weight DECIMAL(18,2)
--setting count to start from first keyword
SET #count = 2
--setting the topic name as the first row in temp table
SET #t_name = (Select keyword from #tempKeywords where ID = 1)
--loop to insert data from temp table into Keyword table
WHILE(#count < #rowcount)
BEGIN
SET #key = (SELECT keyword FROM #tempKeywords where ID = #count)
SET #key_weight = (SELECT keyword FROM #tempKeywords where ID = #count+2)
INSERT INTO Keyword(Topic_Name,Keyword,K_Weight)
VALUES(#t_name,#key,#key_weight)
SET #count= #count +2
END
--End stored procedure
END
To solve the second part of your problem:
INSERT INTO Keyword(Topic_Name,Keyword,K_Weight)
SELECT tk1.keyword, tk2.keyword, tk3.keyword
FROM
#tempKeywords tk1
cross join
#tempKeywords tk2
inner join
#tempKeywords tk3
on
tk2.ID = tk3.ID - 1
WHERE
tk1.ID = 1 AND
tk2.ID % 2 = 0
(This code should replace everything in your current script from the --declaring variables for loop comment onwards)
You could change:
WHILE(#count < #rowcount)
to
WHILE(#count < (select count(*) from #tempKeywords))
But like marc_s commented, you should be able to do this without a while loop.
I'd look at reworking your query to see if you can do this in a set based way rather than row by row.
I'm not sure I follow exactly what you are trying to achieve, but I'd be tempted to look at the ROW_NUMBER() function to set the ID of your temp table. Used with a recursive CTE such as shown in this answer you could get an id for each of your non empty trimmed words. An example is something like;
DECLARE #KeywordList varchar(max) = 'TEST,WORD, ,,,LIST, SOME , WITH, SPACES'
CREATE TABLE #tempKeywords(ID int NOT NULL, keyword varchar(10) NOT NULL)
;WITH kws (ord, DataItem, Data) AS(
SELECT CAST(1 AS INT), LEFT(#KeywordList, CHARINDEX(',',#KeywordList+',')-1) ,
STUFF(#KeywordList, 1, CHARINDEX(',',#KeywordList+','), '')
union all
select ord + 1, LEFT(Data, CHARINDEX(',',Data+',')-1),
STUFF(Data, 1, CHARINDEX(',',Data+','), '')
from kws
where Data > ''
), trimKws(ord1, trimkw) AS (
SELECT ord, RTRIM(LTRIM(DataItem))
FROM kws
)
INSERT INTO #tempKeywords (ID, keyword)
SELECT ROW_NUMBER() OVER (ORDER BY ord1) as OrderedWithoutSpaces, trimkw
FROM trimKws WHERE trimkw <> ''
SELECT * FROM #tempKeywords
I don't fully understand what you are trying to acheive with the second part of your query , but but you could just build on this to get the remainder of it working. It certainly looks as though you could do what you are after without while statements at least.

Resources