I want to return integers from rather complex strings which combined unicode characters such as - and . with characters and integers.
I've come a long way in achieving this, but I still have troubles with some strings of a more complex structure. For instance:
DECLARE #Tabl as table
(
dats nvarchar(15)
)
INSERT INTO #Tabl VALUES
('103-P705hh'),
('115-xxx-44'),
('103-705.13'),
('525-hheef4')
select LEFT(SUBSTRING(REPLACE(REPLACE(dats, '.',''),'-',''), PATINDEX('%[0-9.-]%', REPLACE(REPLACE(dats, '.',''),'-','')), 8000),
PATINDEX('%[^0-9.-]%', SUBSTRING(REPLACE(REPLACE(dats, '.',''),'-',''), PATINDEX('%[0-9.-]%', REPLACE(REPLACE(dats, '.',''),'-','')), 8000) + 'X')-1)
from #tabl
Gives
Raw Input Actual return: Desired return:
103-P705hh 103 103705
115-xxx-44 115 11544
103-705.13 10370513 10370513
525-hheef4 525 5254
I had a topic regarding this yesterday to cover the case when multiple - or . are present, but as seen in the return this is actually taken care of now. However, expanding the databases I work with I encountered much more complex string such as those I presented here.
Does anyone have any idea what to do when characters and integers are "mixed up" in the string?
Regards,
Cenderze
I have seen loads of solutions that use a scalar udf with a loop, but I don't like either of these things, so throwing my hat into the ring with a different approach.
With the help of a numbers table you can deconstruct each value into its individual characters, remove non-numeric characters, then reconstruct it using FOR XML to concatenate rows, e.g.
WITH Numbers (Number) AS
( SELECT ROW_NUMBER() OVER(ORDER BY N1.N)
FROM (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N1 (N) -- 100
CROSS JOIN (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N2 (N) -- 100
CROSS JOIN (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N3 (N) -- 1,000
--CROSS JOIN (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N4 (N) -- 10,000
--CROSS JOIN (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N5 (N) -- 100,000
--COMMENT OR UNCOMMENT ROWS AS NECESSARY DEPENDING ON YOU MAX STRING LENGTH
)
SELECT t.dats,
Stripped = x.data.value('.', 'INT')
FROM #tabl AS t
CROSS APPLY
( SELECT SUBSTRING(t.dats, n.Number, 1)
FROM Numbers n
WHERE n.Number <= LEN(t.dats)
AND SUBSTRING(t.dats, n.Number, 1) LIKE '[0-9]'
ORDER BY n.Number
FOR XML PATH(''), TYPE
) x (data);
Gives:
dats Stripped
----------------------
103-P705hh 103705
115-xxx-44 11544
103-705.13 10370513
525-hheef4 5254
I haven't done any testing so it could be that the added overhead of expanding each string into individual characters and reconstructing it is actually a lot more overhead than than a UDF with a loop.
I decided to bench mark this
1. Set up functions
CREATE FUNCTION dbo.ExtractNumeric_TVF (#Input VARCHAR(8000))
RETURNS TABLE
AS
RETURN
( WITH Numbers (Number) AS
( SELECT TOP (LEN(#Input)) ROW_NUMBER() OVER(ORDER BY N1.N)
FROM (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N1 (N) -- 100
CROSS JOIN (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N2 (N) -- 100
CROSS JOIN (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N3 (N) -- 1,000
CROSS JOIN (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) AS N4 (N) -- 10,000
)
SELECT Stripped = x.data.value('.', 'VARCHAR(MAX)')
FROM ( SELECT SUBSTRING(#Input, n.Number, 1)
FROM Numbers n
WHERE n.Number <= LEN(#Input)
AND SUBSTRING(#Input, n.Number, 1) LIKE '[0-9]'
ORDER BY n.Number
FOR XML PATH(''), TYPE
) x (data)
);
GO
create function dbo.ExtractNumeric_UDF(#s varchar(8000))
returns varchar(8000)
as
begin
declare #out varchar(max) = ''
declare #c char(1)
while len(#s) > 0 begin
set #c = left(#s,1)
if #c like '[0123456789]' set #out += #c
set #s = substring(#s, 2, len(#s) -1)
end
return #out
end
GO
2. Create first set of sample data and log table
CREATE TABLE dbo.T (Value VARCHAR(8000) NOT NULL);
INSERT dbo.T (Value)
SELECT TOP 1000 LEFT(NEWID(), CEILING(RAND(CHECKSUM(NEWID())) * 36))
FROM sys.all_objects a
CROSS JOIN sys.all_objects b;
CREATE TABLE dbo.TestLog (Fx VARCHAR(255), NumberOfRows INT, TimeStart DATETIME2(7), TimeEnd DATETIME2(7))
3. Run Tests
GO
DECLARE #T TABLE (Val VARCHAR(8000));
INSERT dbo.TestLog (fx, NumberOfRows, TimeStart)
VALUES ('dbo.ExtractNumeric_UDF', 1000, SYSDATETIME());
INSERT #T (Val)
SELECT dbo.ExtractNumeric_UDF(Value)
FROM dbo.T;
UPDATE dbo.TestLog
SET TimeEnd = SYSDATETIME()
WHERE TimeEnd IS NULL;
GO 100
DECLARE #T TABLE (Val VARCHAR(8000));
INSERT dbo.TestLog (fx, NumberOfRows, TimeStart)
VALUES ('dbo.ExtractNumeric_TVF', 1000, SYSDATETIME());
INSERT #T (Val)
SELECT f.Stripped
FROM dbo.T
CROSS APPLY dbo.ExtractNumeric_TVF(Value) f;
UPDATE dbo.TestLog
SET TimeEnd = SYSDATETIME()
WHERE TimeEnd IS NULL;
GO 100
4. Get Results
SELECT Fx,
NumberOfRows,
RunTime = AVG(DATEDIFF(MILLISECOND, TimeStart, TimeEnd))
FROM dbo.TestLog
GROUP BY fx, NumberOfRows;
I did the following (using just NEWID() so only a maximum of 36 characters) over 1,000 and 10,000 rows, the results were:
Fx NumberOfRows RunTime
--------------------------------------------------------
dbo.ExtractNumeric_TVF 1000 31
dbo.ExtractNumeric_UDF 1000 56
dbo.ExtractNumeric_TVF 10000 280
dbo.ExtractNumeric_UDF 10000 510
So the TVF coming in at just under half the time of the UDF.
I wanted to test edge cases so put 1,000 rows of longer strings (5,400 characters)
TRUNCATE TABLE dbo.T;
INSERT dbo.T (Value)
SELECT TOP 1000
REPLICATE(CONCAT(NEWID(), NEWID(), NEWID(), NEWID(), NEWID()), 30)
FROM sys.all_objects a
CROSS JOIN sys.all_objects b;
And this is where the TVF came into its own, running over 5x faster:
Fx NumberOfRows RunTime
------------------------------------------------
dbo.ExtractNumeric_TVF 1000 2485
dbo.ExtractNumeric_UDF 1000 12955
I also really don't like the looping solutions so I decided to try my hand at one. This is using a predefined tally table but is quite similar to others posted here already.
This is my tally table. I keep this as a view on my system.
create View [dbo].[cteTally] as
WITH
E1(N) AS (select 1 from (values (1),(1),(1),(1),(1),(1),(1),(1),(1),(1))dt(n)),
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS
(
SELECT ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
)
select N from cteTally
GO
Because I don't like looping I decided to use the table valued function approach which let me reuse this functionality in other queries with little to no effort. Here is one way to write such a function.
create function GetOnlyNumbers
(
#SearchVal varchar(8000)
) returns table as return
with MyValues as
(
select substring(#SearchVal, N, 1) as number
, t.N
from cteTally t
where N <= len(#SearchVal)
and substring(#SearchVal, N, 1) like '[0-9]'
)
select distinct NumValue = STUFF((select number + ''
from MyValues mv2
order by mv2.N
for xml path('')), 1, 0, '')
from MyValues mv
That looks good but the proof is in the pudding. Let's take this out with our sample data and kick the tires a few times.
DECLARE #Tabl as table
(
dats nvarchar(15)
)
INSERT INTO #Tabl VALUES
('103-P705hh'),
('115-xxx-44'),
('103-705.13'),
('525-hheef4')
select *
from #Tabl t
cross apply dbo.GetOnlyNumbers(t.dats) x
Sure looks nice and tidy. I tested against several of the other solutions posted here and without going into deep testing this appears to be significantly faster than the other approaches posted at this time.
DECLARE #Tabl as table
(
ID INT,
dats nvarchar(15)
)
INSERT INTO #Tabl VALUES
(1, '103-P705hh'),
(2, '115-xxx-44'),
(3, '103-705.13'),
(4, '525-hheef4')
SELECT T.ID, t.dats
,(
SELECT SUBSTRING(tt.dats,V.number,1)
FROM #Tabl tt
JOIN master.dbo.spt_values V ON V.type='P' AND V.number BETWEEN 1 AND LEN(tt.dats)
WHERE tt.ID=T.ID AND SUBSTRING(TT.dats,V.number,1) LIKE '[0-9]'
ORDER BY V.number
FOR XML PATH('')
) S
FROM #Tabl t
ORDER BY T.ID;
Can you use a udf ? If so, try this
create alter function numerals(#s varchar(max))
returns varchar(max)
as
begin
declare #out varchar(max) = ''
declare #c char(1)
while len(#s) > 0 begin
set #c = left(#s,1)
if #c like '[0123456789]' set #out += #c
set #s = substring(#s, 2, len(#s) -1)
end
return #out
end
to use it on your temp table...
select dbo.numerals(dats) from #Tabl
another solution, that does not use a UDF, but will work only if your table has a primary key, uses a recursive CTE. It is:
DECLARE #Tabl as table
(pk int identity not null, -- <=== added a primary key
dats nvarchar(max) )
INSERT INTO #Tabl VALUES
('103-P705hh'),
('115-xxx-44'),
('103-705.13'),
('525-hheef4');
with newVals(pk, pos, newD) as
(select pk, 1,
case when left(Dats,1) like '[0123456789]'
then left(Dats,1) else '' end
from #tabl
Union All
Select t.pk, pos + 1, n.newD +
case when substring(dats, pos+1, 1) like '[0123456789]'
then substring(dats, pos+1, 1) else '' end
from #tabl t join newVals n on n.pk = t.pk
where pos+1 <= len(dats) )
Select newD from newVals x
where pos = (Select Max(pos)
from newVals
where pk = x.pk)
Related
I ran this query :
select * From dbo.fn_split('Age,15,14,193,188 ',',')
It's returning the values but cutting off one character in front of each value
I tried adding space after every comma like
select * From dbo.fn_split('Age, 15, 14, 193, 188 ',',')
And it worked. But I want to know why is not working with commas
select * From dbo.fn_split ('Age,15,14,193,188 ',',')
You're not posting the code of fn_split which is where the problem resides. But if your strings are less than 8000 chars long, this function will help you to split them in an optimal way. This function is a modified version of Jeff Moden's splitter made by Eirikur Eirikson.
CREATE FUNCTION [dbo].[DelimitedSplit8K_LEAD]
--===== Define I/O parameters
(#pString VARCHAR(8000), #pDelimiter CHAR(1))
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
--===== "Inline" CTE Driven "Tally Table” produces values from 0 up to 10,000...
-- enough to cover VARCHAR(8000)
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "zero base" and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT 0 UNION ALL
SELECT TOP (DATALENGTH(ISNULL(#pString,1))) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT t.N+1
FROM cteTally t
WHERE (SUBSTRING(#pString,t.N,1) = #pDelimiter OR t.N = 0)
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY s.N1),
Item = SUBSTRING(#pString,s.N1,ISNULL(NULLIF((LEAD(s.N1,1,1) OVER (ORDER BY s.N1) - 1),0)-s.N1,8000))
FROM cteStart s
;
GO
ALTER FUNCTION [dbo].[fn_Split](#text varchar(8000), #delimiter varchar(20) = ',')
RETURNS #Strings TABLE
(
position int IDENTITY PRIMARY KEY,
value varchar(8000)
)
AS
BEGIN
DECLARE #index int
SET #index = -1
WHILE (LEN(#text) > 0)
BEGIN
SET #index = CHARINDEX(#delimiter , #text)
IF (#index = 0) AND (LEN(#text) > 0)
BEGIN
INSERT INTO #Strings VALUES (#text)
BREAK
END
IF (#index > 1)
BEGIN
INSERT INTO #Strings VALUES (LEFT(#text, #index - 1))
SET #text = RIGHT(#text, (LEN(#text) - #index))
END
ELSE
SET #text = RIGHT(#text, (LEN(#text) - #index))
END
RETURN
END
I figured out what was I doing wrong. Basically I was adding an extra space at the end of string that I want to split. If you change
Select * From dbo.fn_split(dbo.fn_split('Age,15,14,193,188 ',',')) to Select * From dbo.fn_split('Age,15,14,193,188',','). In other words, get rid of the extra space after number 188 you will get the desired result.
Solution
First post - I am trying to pull ten different pieces of information from a single field. Let me start with this is not my table, just what I was given to work with. This is a varchar max field.
'3350|#|1234567|~|3351|#|8/1/2017|~|3352|#|Acme|~|3353|~|10000.00|~|3354|#||~|3355|#||~3356|#|Yes|~|3357|#|Doe,John|~|3358|#|CA|~|3359|#|5551212'
I know that the numbers that start with 33 are keys telling me what information is in that section. 3350 has the invoice #1234567. 3351 has the invoice date of 8/1/17. etc. 3354 and 3355 were left null. The keys are unchanging and will be the same for every record in the table.
I need to pull the data from between 3350|#| and |~|3351 to get my invoice# and between 3351|#| and |~|3352 to get my date, etc, but I am struggling with how to word this. Any help would be appreciated and any critiques on my first post will be taken constructively.
The #YourTable is just a table variable used for demonstration / illustration
For Rows - Example
Declare #YourTable table (ID int,SomeCol varchar(max))
Insert Into #YourTable values
(1,'3350|#|1234567|~|3351|#|8/1/2017|~|3352|#|Acme|~|3353|#|10000.00|~|3354|#||~|3355|#||~|3356|#|Yes|~|3357|#|Doe,John|~|3358|#|CA|~|3359|#|5551212')
Select A.ID
,Item = left(RetVal,charindex('|#|',RetVal+'|#|')-1)
,Value = right(RetVal,len(RetVal)-charindex('|#|',RetVal+'|#|')-2)
From #YourTable A
Cross Apply [dbo].[udf-Str-Parse](A.SomeCol,'|~|') B
Returns
For Columns - Example
Declare #YourTable table (ID int,SomeCol varchar(max))
Insert Into #YourTable values
(1,'3350|#|1234567|~|3351|#|8/1/2017|~|3352|#|Acme|~|3353|#|10000.00|~|3354|#||~|3355|#||~|3356|#|Yes|~|3357|#|Doe,John|~|3358|#|CA|~|3359|#|5551212')
Select *
From (
Select A.ID
,Item = left(RetVal,charindex('|#|',RetVal+'|#|')-1)
,Value = right(RetVal,len(RetVal)-charindex('|#|',RetVal+'|#|')-2)
From #YourTable A
Cross Apply [dbo].[udf-Str-Parse](A.SomeCol,'|~|') B
) A
Pivot (max([Value]) For [Item] in ([3350],[3351],[3352],[3353],[3354],[3355],[3356],[3357],[3358],[3359]) ) p
Returns
The UDF if Interested
CREATE FUNCTION [dbo].[udf-Str-Parse] (#String varchar(max),#Delimiter varchar(10))
Returns Table
As
Return (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = LTrim(RTrim(B.i.value('(./text())[1]', 'varchar(max)')))
From (Select x = Cast('<x>' + replace((Select replace(#String,#Delimiter,'§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml).query('.')) as A
Cross Apply x.nodes('x') AS B(i)
);
--Thanks Shnugo for making this XML safe
--Select * from [dbo].[udf-Str-Parse]('Dog,Cat,House,Car',',')
--Select * from [dbo].[udf-Str-Parse]('John Cappelletti was here',' ')
You can try a tally based splitter like below
declare #t table ( id int, col nvarchar(max));
insert into #t values
(1, '3350|#|1234567|~|3351|#|8/1/2017|~|3352|#|Acme|~|3353|~|10000.00|~|3354|#||~|3355|#||~3356|#|Yes|~|3357|#|Doe,John|~|3358|#|CA|~|3359|#|5551212')
,(2, '3350|#|123334567|~|3351|#|8/2/2017|~|3352|#|Acme|~|3353|~|10000.00|~|3354|#||~|3355|#||~3356|#|Yes|~|3357|#|Doe,John|~|3358|#|CA|~|3359|#|5551212');
select
id,
case
when split_values like '3350|#|%' then 'id'
when split_values like '3351|#|%' then 'date'
end as fieldname,
SUBSTRING(split_values,8,LEN(split_values)-7) as value
from
(
select
--t.col as col,
row_number() over (partition by t.col order by t1.N asc) as row_num,
t.id,
SUBSTRING( t.col, t1.N, ISNULL(NULLIF(CHARINDEX('|~|',t.col,t1.N),0)-t1.N,8000)) as split_values
from #t t
join
(
select
t.col,
1 as N
from #t t
UNION ALL
select
t.col,
t1.N + 3 as N
from #t t
join
(
select
top 8000
row_number() over(order by (select NULL)) as N
from
sys.objects s1
cross join
sys.objects s2
) t1
on SUBSTRING(t.col,t1.N,3) = '|~|'
) t1
on t1.col=t.col
)a
where
split_values like '3350|#|%' or
split_values like '3351|#|%'
Live demo
I would like to Improve running time in T-sql code.
The Insert query is taking a long time. Is there any chance to use a different method ?
Here is my Code
Create PROCEDURE [dbo].[CreateTableDemog]
#Age int,
#RetiredAge int
as
begin
declare #t as int=1;
declare #tlast as int=#RetiredAge -#Age
declare #Period as int=0;
BEGIN
while #t<=#tlast
begin
while #Period <=#t-1
begin
INSERT INTO dbo.Table (t, Age,Period,Prob)
VALUES (#t,#Age+#t,#Period,1);
set #Period=#Period+1
end
set #Period=0
set #t=#t+1
end
end
Here is how you could do this using a tally table instead of the nested while loops. This is a strange requirement but easy enough to follow.
This article by Jeff Moden does a great job of explaining what a tally is and how you can use it. http://www.sqlservercentral.com/articles/T-SQL/62867/
Create PROCEDURE [dbo].[CreateTableDemog]
(
#Age int
, #RetiredAge int
) as
set nocount on;
WITH
E1(N) AS (select 1 from (values (1),(1),(1),(1),(1),(1),(1),(1),(1),(1))dt(n)),
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS
(
SELECT ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
)
INSERT INTO dbo.Table
(
t
, Age
, Period
, Prob
)
select t1.N as T
, #Age + t1.N as AgePlusT
, abs(t2.N - t1.N) as Period
, 1
from cteTally t1
join cteTally t2 on t2.N <= t1.N
where t1.N <= #RetiredAge - #Age
order by t1.N
, #Age + t1.N
, abs(t2.N - t1.N);
GO
Am trying to understand this query from a Stackoverflow thread:
--create test table
CREATE TABLE dbo.TestTable(
Col1 nchar(4000) NOT NULL
, Col2 nvarchar(MAX) NOT NULL
);
--load 10000 rows (about 2.8GB)
WITH
t4 AS (SELECT n FROM (VALUES(0),(0),(0),(0)) t(n))
,t256 AS (SELECT 0 AS n FROM t4 AS a CROSS JOIN t4 AS b CROSS JOIN t4 AS c CROSS JOIN t4 AS d)
,t16M AS (SELECT ROW_NUMBER() OVER (ORDER BY (a.n)) AS num FROM t256 AS a CROSS JOIN t256 AS b CROSS JOIN t256 AS c)
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
SELECT REPLICATE(N'X', 4000), REPLICATE(CAST('X' AS nvarchar(MAX)), 10000)
FROM t16M
WHERE num <= 100000;
GO
--run query in loop (expect parallel execution plan with many read-ahead and LOB page reads)
SET NOCOUNT ON;
DECLARE #RowCount int, #Iteration int = 1;
WHILE #Iteration <= 100
BEGIN
CHECKPOINT;
DBCC DROPCLEANBUFFERS WITH NO_INFOMSGS;
SELECT #RowCount = COUNT(*) FROM dbo.TestTable WHERE Col2 LIKE 'X%';
RAISERROR('Iteration %d completed',0,1,#Iteration) WITH NOWAIT; --display progress message
SET #Iteration += 1;
END;
GO
I especially couldn't understand this portionof the t-sql query :
--load 10000 rows (about 2.8GB)
WITH
t4 AS (SELECT n FROM (VALUES(0),(0),(0),(0)) t(n))
,t256 AS (SELECT 0 AS n FROM t4 AS a CROSS JOIN t4 AS b CROSS JOIN t4 AS c CROSS JOIN t4 AS d)
,t16M AS (SELECT ROW_NUMBER() OVER (ORDER BY (a.n)) AS num FROM t256 AS a CROSS JOIN t256 AS b CROSS JOIN t256 AS c)
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
SELECT REPLICATE(N'X', 4000), REPLICATE(CAST('X' AS nvarchar(MAX)), 10000)
FROM t16M
WHERE num <= 100000;
GO
Why do we have a "With" below "--load 10000rows..." ; what does that "with" do? is it part of the 'create' statement?
regarding this insert statement:
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
SELECT REPLICATE(N'X', 4000), REPLICATE(CAST('X' AS nvarchar(MAX)), 10000)
FROM t16M
WHERE num <= 100000;
Wouldn't just
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
FROM t16M
WHERE num <= 100000;
do? Wouldn't the above take the first 10000 rows from t16M and insert into dbo.TestTable? Why are we doing that "select..."? What is the implication of that?
I understand that REPLICATE(N'X', 4000) would put X 4000 times in Col1, REPLICATE(CAST('X' AS nvarchar(MAX)), 10000) would put X 10000 times in Col2. If we are doing this, then why select from t16M? or if we are selecting from t16M, then why do this replicate thing?
what does
#RowCount = COUNT(*)
do? Assign num of rows in the table to a variable called 'RowCount'? Is it the same as '##RowCount'? I don't think it can be.
I''ll reformat the code for better understanding:
WITH t4 AS(
SELECT n FROM (VALUES(0),(0),(0),(0)) t(n)
)
,t256 AS(
SELECT 0 AS n
FROM t4 AS a
CROSS JOIN t4 AS b
CROSS JOIN t4 AS c
CROSS JOIN t4 AS d
)
,t16M AS(
SELECT ROW_NUMBER() OVER (ORDER BY (a.n)) AS num
FROM t256 AS a
CROSS JOIN t256 AS b
CROSS JOIN t256 AS c
)
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
SELECT REPLICATE(N'X', 4000), REPLICATE(CAST('X' AS nvarchar(MAX)), 10000)
FROM t16M
WHERE num <= 100000;
The 3 cascading CTEmade up a Tally Table or others called Numbers Table. This is composed of sequential numbers from 1 up to some number.
This one generates 4 rows with 0 value:
WITH t4 AS(
SELECT n FROM (VALUES(0),(0),(0),(0)) t(n)
)
Then it is CROSS JOINed to itself 4 times thus generating 4 * 4 * 4 * 4 or 4^4 or 256 rows, thus the alias t256. Again t256 is CROSS JOINed to itself 3 times producing 16,777,216 rows thus the alias t16M.
If you do a SELECT * FROM t16M, you can verify that it returns over 16M rows.
This is used then to insert 100000 rows into the TestTable, as evidenced by the where clause:
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
SELECT REPLICATE(N'X', 4000), REPLICATE(CAST('X' AS nvarchar(MAX)), 10000)
FROM t16M
WHERE num <= 100000
Some may use a WHILE loop in attempt to do this, that is insert 100000 rows into a table. The Tally Table is a great way to do this in a set-based fashion. For more info, read this: http://www.sqlservercentral.com/articles/T-SQL/62867/
Question #2
The WITH keyword below the CREATE statement marks the declaration of a Common Table Expression.
Question #3
I believe the query below will produce a syntax error.
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
FROM t16M
WHERE num <= 100000;
This, on the other hand, will not.
INSERT INTO dbo.TestTable WITH(TABLOCKX) (Col1, Col2)
SELECT REPLICATE(N'X', 4000), REPLICATE(CAST('X' AS nvarchar(MAX)), 10000)
FROM t16M
WHERE num <= 100000;
What this query does is INSERT 100000 rows, composed of 2 columns, whose values are continuous strings of 'X's. Remember that t16M is our Tally Table which consists of sequence of numbers from 1 up to 16M+. We do not use the values of the Tally Table for the insert, we only use the presence of its rows to limit the number of inserts.
Question #4 You're right when you said #RowCount = COUNT(*) assigns the number of rows to the variable.
SELECT #RowCount = COUNT(*) FROM dbo.TestTable WHERE Col2 LIKE 'X%';
SELECT ##ROWCOUNT
However, the above statements are not the same. ##ROWCOUNT returns the number of rows affected by the last statement. If we put it inside the WHILE loop right after the SELECT #RowCount, it will return 1, as only 1 row is affected. However, if we put it directly after the INSERT statement, it will return the same as SELECT COUNT(*) FROM dbo.TestTable.
I have written the following function that takes in two strings (comma-separated), splits them into two different temp tables and then uses those temp tables to find what percentage of words match in those two temp tables. The problem is that when I am using it per row basis on a data set of about 200k rows, the query times out!
Are there any optimizations that you can see that can be done?
ALTER FUNCTION [GetWordSimilarity](#String varchar(8000),
#String2 varchar(8000),#Delimiter char(1))
returns decimal(16,2)
as
begin
declare #result as decimal (16,2)
declare #temptable table (items varchar(8000))
declare #temptable2 table (items varchar(8000))
declare #numberOfCommonWords decimal(16,2)
declare #countTable1 decimal(16,2)
declare #countTable2 decimal(16,2)
declare #denominator decimal(16,2)
set #result = 0.0 --dummy value
declare #idx int
declare #slice varchar(8000)
select #idx = 1
if len(#String)<1 or #String is null or len(#String2) = 0 or #String2 is null return 0.0
--populating #temptable
while #idx!= 0
begin
set #idx = charindex(#Delimiter,#String)
if #idx!=0
set #slice = left(#String,#idx - 1)
else
set #slice = #String
if(len(#slice)>0)
insert into #temptable(Items) values(ltrim(rtrim(#slice)))
set #String = right(#String,len(#String) - #idx)
if len(#String) = 0 break
end
select #idx = 1
----populating #temptable2
while #idx!= 0
begin
set #idx = charindex(#Delimiter,#String2)
if #idx!=0
set #slice = left(#String2,#idx - 1)
else
set #slice = #String2
if(len(#slice)>0)
insert into #temptable2(Items) values(ltrim(rtrim(#slice)))
set #String2 = right(#String2,len(#String2) - #idx)
if len(#String2) = 0 break
end
--calculating percentage of words match
if (((select COUNT(*) from #temptable) = 0) or ((select COUNT(*) from #temptable2) = 0))
return 0.0
select #numberOfCommonWords = COUNT(*) from
(
select distinct items from #temptable
intersect
select distinct items from #temptable2
) a
select #countTable1 = COUNT (*) from #temptable
select #countTable2 = COUNT (*) from #temptable2
if(#countTable1 > #countTable2) set #denominator = #countTable1
else set #denominator = #countTable2
set #result = #numberOfCommonWords/#denominator
return #result
end
Thanks a bunch !
There is no way to write a T SQL UDF with heavy string manipulation inside that will behave OK on large number of rows. You will get some gain if you use the Numbers table, though:
declare
#col_list varchar(1000),
#sep char(1)
set #col_list = 'TransactionID, ProductID, ReferenceOrderID, ReferenceOrderLineID, TransactionDate, TransactionType, Quantity, ActualCost, ModifiedDate'
set #sep = ','
select substring(#col_list, n, charindex(#sep, #col_list + #sep, n) - n)
from numbers where substring(#sep + #col_list, n, 1) = #sep
and n < len(#col_list) + 1
Your best course of action would be to write the whole thing in SQLCLR.
The problem of course is with the design. You shouldn't be storing comma-separated data in a SQL database to start with.
But, I guess we're stuck with it for now.
One thing to consider is converting the function to SQLCLR; SQL by itself is not very good with string operations. (Well, in fact, no language is good with string operations IMHO but SQL really is bad at it =)
The splitter you use to fill #Temptables 1 & 2 can be optimized by using the code from Jeff Moden who wrote several fantastic articles of which the last one can be found here : http://www.sqlservercentral.com/articles/Tally+Table/72993/
Taking his splitter + optimizing the rest of the code a bit I managed to get from 771 seconds to 305 seconds on a 200K random data sample.
Some things to note: the results aren't quite the same. I checked some manually and I actually think the new results are more accurate but don't really have time to go bughunting on both versions.
I tried to convert this to a more set-based approach where I first load all the words in a table that has all words for all row_id's and then join them back together. Although the joining is quite fast, it simply takes too long to create the initial tables so it even loses out on the original function.
Maybe I'll try to figure out another way to make it faster but for now I hope this will help you out a bit.
ALTER FUNCTION [GetWordSimilarity2](#String1 varchar(8000),
#String2 varchar(8000),#Delimiter char(1))
returns decimal(16,2)
as
begin
declare #temptable1 table (items varchar(8000), row_id int IDENTITY(1, 1), PRIMARY KEY (items, row_id))
declare #temptable2 table (items varchar(8000), row_id int IDENTITY(1, 1), PRIMARY KEY (items, row_id))
declare #numberOfCommonWords decimal(16,2)
declare #countTable1 decimal(16,2)
declare #countTable2 decimal(16,2)
-- based on code from Jeff Moden (http://www.sqlservercentral.com/articles/Tally+Table/72993/)
--populating #temptable1
;WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(#String1),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#String1,t.N,1) = #Delimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#Delimiter,#String1,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
INSERT #temptable1 (items)
SELECT Item = SUBSTRING(#String1, l.N1, l.L1)
FROM cteLen l
SELECT #countTable1 = ##ROWCOUNT
----populating #temptable2
;WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(#String2),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#String2,t.N,1) = #Delimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#Delimiter,#String2,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
INSERT #temptable2 (items)
SELECT Item = SUBSTRING(#String2, l.N1, l.L1)
FROM cteLen l
SELECT #countTable2 = ##ROWCOUNT
--calculating percentage of words match
if #countTable1 = 0 OR #countTable2 = 0
return 0.0
select #numberOfCommonWords = COUNT(DISTINCT t1.items)
from #temptable1 t1
JOIN #temptable2 t2
ON t1.items = t2.items
RETURN #numberOfCommonWords / (CASE WHEN (#countTable1 > #countTable2) THEN #countTable1 ELSE #countTable2 END)
end