Related
I have the following code that loops through a table with unique model numbers and creates a new table that contains, for each model numbers, a row based on the year and week number. How can I translate this so it doesn't use a cursor?
DECLARE #current_model varchar(50);
--declare a cursor that iterates through model numbers in ItemInformation table
DECLARE model_cursor CURSOR FOR
SELECT model from ItemInformation
--start the cursor
OPEN model_cursor
--get the next (first value)
FETCH NEXT FROM model_cursor INTO #current_model;
DECLARE #year_counter SMALLINT;
DECLARE #week_counter TINYINT;
WHILE (##FETCH_STATUS = 0) --fetch status returns the status of the last cursor, if 0 then there is a next value (FETCH statement was successful)
BEGIN
SET #year_counter = 2019;
WHILE (#year_counter <= Datepart(year, Getdate() - 1) + 2)
BEGIN
SET #week_counter = 1;
WHILE (#week_counter <= 52)
BEGIN
INSERT INTO dbo.ModelCalendar(
model,
sales_year,
sales_week
)
VALUES(
#current_model,
#year_counter,
#week_counter
)
SET #week_counter = #week_counter + 1
END
SET #year_counter = #year_counter + 1
END
FETCH NEXT FROM model_cursor INTO #current_model
END;
CLOSE model_cursor;
DEALLOCATE model_cursor;
If ItemInformation contains the following table:
model,invoice
a,4.99
b,9.99
c,1.99
d,8.99
then the expected output is:
model,sales_year,sales_week
A,2019,1
A,2019,2
A,2019,3
...
A,2019,52
A,2020,1
A,2020,2
A,2020,3
...
A,2020,51
A,2020,52
A,2020,53 (this is 53 because 2020 is leap year and has 53 weeks)
A,2021,1
A,2021,2
...
A,2022,1
A,2022,2
...
A,2022,52
B,2019,1
B,2019,2
...
D, 2022,52
Using CTE's you can get all combinations of weeks and years within the range required. Then join your data table on.
declare #Test table (model varchar(1), invoice varchar(4));
insert into #Test (model, invoice)
values
('a', '4.99'),
('b', '9.99'),
('c', '1.99'),
('d', '8.99');
with Week_CTE as (
select 1 as WeekNo
union all
select 1 + WeekNo
from Week_CTE
where WeekNo < 53
), Year_CTE as (
select 2019 YearNo
union all
select 1 + YearNo
from Year_CTE
where YearNo <= datepart(year, current_timestamp)
)
select T.model, yr.YearNo, wk.WeekNo
from Week_CTE wk
cross join (
select YearNo
-- Find the last week of the year (52 or 53) -- might need to change the start day of the week for this to be correct
, datepart(week, dateadd(day, -1, dateadd(year, 1, '01 Jan ' + convert(varchar(4),YearNo)))) LastWeek
from Year_CTE yr
) yr
cross join (
-- Assuming only a single row per model is required, and the invoice column can be ignored
select model
from #Test
group by model
) T
where wk.WeekNo <= yr.LastWeek
order by yr.YearNo, wk.WeekNo;
As you have advised that using a recursive CTE is not an option, you can try using a CTE without recursion:
with T(N) as (
select X.N
from (values (0),(0),(0),(0),(0),(0),(0),(0)) X(N)
), W(N) as (
select top (53) row_number() over (order by ##version) as N
from T T1
cross join T T2
), Y(N) as (
-- Upper limit on number of years
select top (12) 2018 + row_number() over (order by ##version) AS N
from T T1
cross join T T2
)
select W.N as WeekNo, Y.N YearNo, T.model
from W
cross join (
select N
-- Find the last week of the year (52 or 53) -- might need to change the start day of the week for this to be correct
, datepart(week, dateadd(day, -1, dateadd(year, 1, '01 Jan ' + convert(varchar(4),N)))) LastWeek
from Y
) Y
cross join (
-- Assuming only a single row per model is required, and the invoice column can be ignored
select model
from #Test
group by model
) T
-- Filter to required number of years.
where Y.N <= datepart(year, current_timestamp) + 1
and W.N <= Y.LastWeek
order by Y.N, W.N, T.model;
Note: If you setup your sample data in future with the DDL/DML as shown here you will greatly assist people attempting to answer.
I don't like to see a loop solution where a set solution can be found. So here goes Take II with no CTE, no values and no row_number() (the table variable is just to simulate your data so not part of the actual solution):
declare #Test table (model varchar(1), invoice varchar(4));
insert into #Test (model, invoice)
values
('a', '4.99'),
('b', '9.99'),
('c', '1.99'),
('d', '8.99');
select Y.N + 2019 YearNumber, W.WeekNumber, T.Model
from (
-- Cross join 5 * 10, then filter to 52/53 as required
select W1.N * 10 + W2.N + 1 WeekNumber
from (
select 0 N
union all select 1
union all select 2
union all select 3
union all select 4
union all select 5
) W1
cross join (
select 0 N
union all select 1
union all select 2
union all select 3
union all select 4
union all select 5
union all select 6
union all select 7
union all select 8
union all select 9
) W2
) W
-- Cross join number of years required, just ensure its more than will ever be needed then filter back
cross join (
select 0 N
union all select 1
union all select 2
union all select 3
union all select 4
union all select 5
union all select 6
union all select 7
union all select 8
union all select 9
) Y
cross join (
-- Assuming only a single row per model is required, and the invoice column can be ignored
select model
from #Test
group by model
) T
-- Some filter to restrict the years
where Y.N <= 3
-- Some filter to restrict the weeks
and W.WeekNumber <= 53
order by YearNumber, WeekNumber;
I created a table to temporary calendar table containing all the weeks and years. To account for leap years, I took the last 7 days of a year and got the ISO week for each day. To know how many weeks are in a year, I put these values into another temp table and took the max value of it. Azure Synapse doesn't support multiple values in one insert so it looks a lot longer than it should be. I also have to declare each as variable since Synapse can only insert literal or variable. I then cross-joined with my ItemInformation table.
CREATE TABLE #temp_dates
(
year SMALLINT,
week TINYINT
);
CREATE TABLE #temp_weeks
(
week_num TINYINT
);
DECLARE #year_counter SMALLINT
SET #year_counter = 2019
DECLARE #week_counter TINYINT
WHILE ( #year_counter <= Datepart(year, Getdate() - 1) + 2 )
BEGIN
SET #week_counter = 1;
DECLARE #day_1 TINYINT
SET #day_1 = Datepart(isowk, Concat('12-25-', #year_counter))
DECLARE #day_2 TINYINT
SET #day_2 = Datepart(isowk, Concat('12-26-', #year_counter))
DECLARE #day_3 TINYINT
SET #day_3 = Datepart(isowk, Concat('12-27-', #year_counter))
DECLARE #day_4 TINYINT
SET #day_4 = Datepart(isowk, Concat('12-28-', #year_counter))
DECLARE #day_5 TINYINT
SET #day_5 = Datepart(isowk, Concat('12-29-', #year_counter))
DECLARE #day_6 TINYINT
SET #day_6 = Datepart(isowk, Concat('12-30-', #year_counter))
DECLARE #day_7 TINYINT
SET #day_7 = Datepart(isowk, Concat('12-31-', #year_counter))
TRUNCATE TABLE #temp_weeks
INSERT INTO #temp_weeks
(week_num)
VALUES (#day_1)
INSERT INTO #temp_weeks
(week_num)
VALUES (#day_2)
INSERT INTO #temp_weeks
(week_num)
VALUES (#day_3)
INSERT INTO #temp_weeks
(week_num)
VALUES (#day_4)
INSERT INTO #temp_weeks
(week_num)
VALUES (#day_5)
INSERT INTO #temp_weeks
(week_num)
VALUES (#day_6)
INSERT INTO #temp_weeks
(week_num)
VALUES (#day_7)
DECLARE #max_week TINYINT
SET #max_week = (SELECT Max(week_num)
FROM #temp_weeks)
WHILE ( #week_counter <= #max_week )
BEGIN
INSERT INTO #temp_dates
(year,
week)
VALUES ( #year_counter,
#week_counter )
SET #week_counter = #week_counter + 1
END
SET #year_counter = #year_counter + 1
END
DROP TABLE #temp_weeks;
SELECT i.model,
d.year,
d.week
FROM dbo.iteminformation i
CROSS JOIN #temp_dates d
ORDER BY model,
year,
week
DROP TABLE #temp_dates
I ran this query :
select * From dbo.fn_split('Age,15,14,193,188 ',',')
It's returning the values but cutting off one character in front of each value
I tried adding space after every comma like
select * From dbo.fn_split('Age, 15, 14, 193, 188 ',',')
And it worked. But I want to know why is not working with commas
select * From dbo.fn_split ('Age,15,14,193,188 ',',')
You're not posting the code of fn_split which is where the problem resides. But if your strings are less than 8000 chars long, this function will help you to split them in an optimal way. This function is a modified version of Jeff Moden's splitter made by Eirikur Eirikson.
CREATE FUNCTION [dbo].[DelimitedSplit8K_LEAD]
--===== Define I/O parameters
(#pString VARCHAR(8000), #pDelimiter CHAR(1))
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
--===== "Inline" CTE Driven "Tally Tableā produces values from 0 up to 10,000...
-- enough to cover VARCHAR(8000)
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "zero base" and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT 0 UNION ALL
SELECT TOP (DATALENGTH(ISNULL(#pString,1))) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT t.N+1
FROM cteTally t
WHERE (SUBSTRING(#pString,t.N,1) = #pDelimiter OR t.N = 0)
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY s.N1),
Item = SUBSTRING(#pString,s.N1,ISNULL(NULLIF((LEAD(s.N1,1,1) OVER (ORDER BY s.N1) - 1),0)-s.N1,8000))
FROM cteStart s
;
GO
ALTER FUNCTION [dbo].[fn_Split](#text varchar(8000), #delimiter varchar(20) = ',')
RETURNS #Strings TABLE
(
position int IDENTITY PRIMARY KEY,
value varchar(8000)
)
AS
BEGIN
DECLARE #index int
SET #index = -1
WHILE (LEN(#text) > 0)
BEGIN
SET #index = CHARINDEX(#delimiter , #text)
IF (#index = 0) AND (LEN(#text) > 0)
BEGIN
INSERT INTO #Strings VALUES (#text)
BREAK
END
IF (#index > 1)
BEGIN
INSERT INTO #Strings VALUES (LEFT(#text, #index - 1))
SET #text = RIGHT(#text, (LEN(#text) - #index))
END
ELSE
SET #text = RIGHT(#text, (LEN(#text) - #index))
END
RETURN
END
I figured out what was I doing wrong. Basically I was adding an extra space at the end of string that I want to split. If you change
Select * From dbo.fn_split(dbo.fn_split('Age,15,14,193,188 ',',')) to Select * From dbo.fn_split('Age,15,14,193,188',','). In other words, get rid of the extra space after number 188 you will get the desired result.
Solution
I have a table like this:
id: PK bigint
RatePercent: decimal(4, 4)
DateRange: date
I am trying to populate the table as follows:
RatePercentage with all of them 0.12
Date starting from '01-01-2015' to '12-31-2099'
Unfortunately with my query it won't do that and it keeps saying that
Operand type clash: date is incompatible with int
I haven't assigned an int datatype asides from the id bigint. I'm a bit confused.
Here is my query so far:
DECLARE #Date Date
SET #Date = '01-01-2015'
WHILE #Date <= '12-31-2099'
BEGIN
INSERT INTO [dbo].[IMF_Main_VATHistory] (VATRate, VATDate)
VALUES (0.12, #Date + 1);
END
Try this:
DECLARE #Date Date
SET #Date = '01-01-2015'
WHILE #Date <= '12-31-2099'
BEGIN
INSERT INTO [dbo].[IMF_Main_VATHistory] (VATRate, VATDate)
VALUES (0.12, DATEADD(DAY, 1, #Date));
SET #Date = DATEADD(DAY, 1, #Date);
END
You can't issue a direct addition to a DATE datatype, in SQL Server (for reference, I think you can in Oracle). You have to use functions in order to modify a DATE/DATETIME variable (or column).
Here is an example SQLFiddle.
The problem is in you "#Date + 1" I think - The SQL-Server likes to try and convert to INT :)
Use DATEADD that should work
DECLARE #Date Date
SET #Date = '01-01-2015'
WHILE #Date <= '12-31-2099'
BEGIN
INSERT INTO [dbo].[IMF_Main_VATHistory] (VATRate, VATDate)
VALUES (0.12, #Date);
SET #Date = DATEADD(DAY, 1, #Date);
END
I'll advise against using any loop-based or RBAR solution. You can do this using a set-based approach with the help of a Tally Table.
DECLARE #startDate DATE
DECLARE #endDate DATE
SET #startDate = '20150101'
SET #endDate = '20991231';
WITH E1(N) AS(
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
),
E2(N) AS(SELECT 1 FROM E1 a, E1 b),
E4(N) AS(SELECT 1 FROM E2 a, E2 b),
E8(N) AS(SELECT 1 FROM E4 a, E4 b),
Tally(n) AS(
SELECT TOP(DATEDIFF(DAY, #startDate, #endDate) + 1)
ROW_NUMBER() OVER(ORDER BY (SELECT NULL))
FROM E8
)
INSERT INTO IMF_Main_VATHistory(RatePercent, DateRange)
SELECT
0.02, DATEADD(DAY, N-1, #startDate)
FROM Tally
It's faster compared to using CURSORs or WHILE loops.
I have written the following function that takes in two strings (comma-separated), splits them into two different temp tables and then uses those temp tables to find what percentage of words match in those two temp tables. The problem is that when I am using it per row basis on a data set of about 200k rows, the query times out!
Are there any optimizations that you can see that can be done?
ALTER FUNCTION [GetWordSimilarity](#String varchar(8000),
#String2 varchar(8000),#Delimiter char(1))
returns decimal(16,2)
as
begin
declare #result as decimal (16,2)
declare #temptable table (items varchar(8000))
declare #temptable2 table (items varchar(8000))
declare #numberOfCommonWords decimal(16,2)
declare #countTable1 decimal(16,2)
declare #countTable2 decimal(16,2)
declare #denominator decimal(16,2)
set #result = 0.0 --dummy value
declare #idx int
declare #slice varchar(8000)
select #idx = 1
if len(#String)<1 or #String is null or len(#String2) = 0 or #String2 is null return 0.0
--populating #temptable
while #idx!= 0
begin
set #idx = charindex(#Delimiter,#String)
if #idx!=0
set #slice = left(#String,#idx - 1)
else
set #slice = #String
if(len(#slice)>0)
insert into #temptable(Items) values(ltrim(rtrim(#slice)))
set #String = right(#String,len(#String) - #idx)
if len(#String) = 0 break
end
select #idx = 1
----populating #temptable2
while #idx!= 0
begin
set #idx = charindex(#Delimiter,#String2)
if #idx!=0
set #slice = left(#String2,#idx - 1)
else
set #slice = #String2
if(len(#slice)>0)
insert into #temptable2(Items) values(ltrim(rtrim(#slice)))
set #String2 = right(#String2,len(#String2) - #idx)
if len(#String2) = 0 break
end
--calculating percentage of words match
if (((select COUNT(*) from #temptable) = 0) or ((select COUNT(*) from #temptable2) = 0))
return 0.0
select #numberOfCommonWords = COUNT(*) from
(
select distinct items from #temptable
intersect
select distinct items from #temptable2
) a
select #countTable1 = COUNT (*) from #temptable
select #countTable2 = COUNT (*) from #temptable2
if(#countTable1 > #countTable2) set #denominator = #countTable1
else set #denominator = #countTable2
set #result = #numberOfCommonWords/#denominator
return #result
end
Thanks a bunch !
There is no way to write a T SQL UDF with heavy string manipulation inside that will behave OK on large number of rows. You will get some gain if you use the Numbers table, though:
declare
#col_list varchar(1000),
#sep char(1)
set #col_list = 'TransactionID, ProductID, ReferenceOrderID, ReferenceOrderLineID, TransactionDate, TransactionType, Quantity, ActualCost, ModifiedDate'
set #sep = ','
select substring(#col_list, n, charindex(#sep, #col_list + #sep, n) - n)
from numbers where substring(#sep + #col_list, n, 1) = #sep
and n < len(#col_list) + 1
Your best course of action would be to write the whole thing in SQLCLR.
The problem of course is with the design. You shouldn't be storing comma-separated data in a SQL database to start with.
But, I guess we're stuck with it for now.
One thing to consider is converting the function to SQLCLR; SQL by itself is not very good with string operations. (Well, in fact, no language is good with string operations IMHO but SQL really is bad at it =)
The splitter you use to fill #Temptables 1 & 2 can be optimized by using the code from Jeff Moden who wrote several fantastic articles of which the last one can be found here : http://www.sqlservercentral.com/articles/Tally+Table/72993/
Taking his splitter + optimizing the rest of the code a bit I managed to get from 771 seconds to 305 seconds on a 200K random data sample.
Some things to note: the results aren't quite the same. I checked some manually and I actually think the new results are more accurate but don't really have time to go bughunting on both versions.
I tried to convert this to a more set-based approach where I first load all the words in a table that has all words for all row_id's and then join them back together. Although the joining is quite fast, it simply takes too long to create the initial tables so it even loses out on the original function.
Maybe I'll try to figure out another way to make it faster but for now I hope this will help you out a bit.
ALTER FUNCTION [GetWordSimilarity2](#String1 varchar(8000),
#String2 varchar(8000),#Delimiter char(1))
returns decimal(16,2)
as
begin
declare #temptable1 table (items varchar(8000), row_id int IDENTITY(1, 1), PRIMARY KEY (items, row_id))
declare #temptable2 table (items varchar(8000), row_id int IDENTITY(1, 1), PRIMARY KEY (items, row_id))
declare #numberOfCommonWords decimal(16,2)
declare #countTable1 decimal(16,2)
declare #countTable2 decimal(16,2)
-- based on code from Jeff Moden (http://www.sqlservercentral.com/articles/Tally+Table/72993/)
--populating #temptable1
;WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(#String1),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#String1,t.N,1) = #Delimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#Delimiter,#String1,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
INSERT #temptable1 (items)
SELECT Item = SUBSTRING(#String1, l.N1, l.L1)
FROM cteLen l
SELECT #countTable1 = ##ROWCOUNT
----populating #temptable2
;WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(#String2),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#String2,t.N,1) = #Delimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#Delimiter,#String2,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
INSERT #temptable2 (items)
SELECT Item = SUBSTRING(#String2, l.N1, l.L1)
FROM cteLen l
SELECT #countTable2 = ##ROWCOUNT
--calculating percentage of words match
if #countTable1 = 0 OR #countTable2 = 0
return 0.0
select #numberOfCommonWords = COUNT(DISTINCT t1.items)
from #temptable1 t1
JOIN #temptable2 t2
ON t1.items = t2.items
RETURN #numberOfCommonWords / (CASE WHEN (#countTable1 > #countTable2) THEN #countTable1 ELSE #countTable2 END)
end
I need to fill a time table to use it for joining the data in reporting services.
Generally I do this with this code:
TRUNCATE TABLE tqTimeTable
DECLARE #CNT int
DECLARE #DATE datetime
DECLARE #END int
SET #CNT = 1
SET #DATE = 25567 -- 01.01.1970
SET #END = 20000 -- + 20k days => years 2024
WHILE(#CNT < #END)
BEGIN
INSERT INTO tqTimeTable (Tag, Monat, Jahr)
VALUES (DATEADD(day,#CNT,#DATE), MONTH(DATEADD(day,#CNT,#DATE)), YEAR(DATEADD(day,#CNT,#DATE)))
SET #CNT = #CNT + 1
END;
But this takes a while (on my test system around 2 minutes) so I hope someone had the same issue and solved it better then me.
As I fire this statement from a .NET connection I need a faster solution or if there isn't one to raise the timeout of my connection.
Simply adding
BEGIN TRAN
WHILE(#CNT < #END)
BEGIN
INSERT INTO tqTimeTable (Tag, Monat, Jahr)
VALUES (DATEADD(day,#CNT,#DATE), MONTH(DATEADD(day,#CNT,#DATE)), YEAR(DATEADD(day,#CNT,#DATE)))
SET #CNT = #CNT + 1
END;
COMMIT
will speed it up as you are doing many individual commits (that all require the log to be written to disc). I would do a set based insert in a single statement though.
WITH E1(N) AS
(
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
) -- 1*10^1 or 10 rows
, E2(N) AS (SELECT 1 FROM E1 a, E1 b) -- 1*10^2 or 100 rows
, E4(N) AS (SELECT 1 FROM E2 a, E2 b) -- 1*10^4 or 10,000 rows
, E8(N) AS (SELECT 1 FROM E4 a, E4 b) -- 1*10^8 or 100,000,000 rows
, NUMS(N) AS (SELECT ROW_NUMBER() OVER (ORDER BY (SELECT 0)) FROM E8)
INSERT INTO tqTimeTable
(Tag,
Monat,
Jahr)
SELECT DATEADD(day, N, #DATE),
MONTH(DATEADD(day, N, #DATE)),
YEAR(DATEADD(day, N, #DATE))
FROM NUMS
WHERE N <= 20000