I have a following table:
State LAB GROUP DATE CODE ID
UP A I 1-Jan 1 345
UP R S 1-Feb 1 456
UP A S 1-Jan 2 567
DL R S 1-Feb 3 678
DL T S 1-Jan 1 789
DL A S 1-Jan 2 900
MN T S 1-Jan 3 1011
MN R I 1-Feb 1 1122
MN S I 1-Feb 2 1233
I need a pivot table of following type:
STATE A R T TOTAL
UP 2 1 0 3
DL 1 1 1 3
MN 0 1 1 2
DISTINCT COUNT OF ID FOR EACH LAB FOR EACH STATE.
I then need the pivot tables filtered for following columns:
GROUP
DATE
CODE
So 1st table will have the pivot table above counting only those records which have GROUP=S
2nd table will have the pivot table above counting only those records which have CODE=1
and so on, I wish to put multiple conditions. and generate several tables one by one and export them.
If this is possible in SQL please let me know! I ruled out excel vba due to the size of table (source table will have 800,000 records approx).
Try this :-
Select [State],[A],[R],[T],Total = [A] + [R]+ [T]
from
(
Select [State],
[A] = Sum(Case when LAB='A' then 1 else 0 END) ,
[R] = Sum(Case when LAB='R' then 1 else 0 END) ,
[T] = Sum(Case when LAB='T' then 1 else 0 END)
from YourTable
group by [State]
)a
SQL FIDDLE
CREATE TABLE #t(States VARCHAR(10),LAB VARCHAR(5),GROUPs VARCHAR(5),DATEs VARCHAR(10),CODE INT,ID INT)
INSERT INTO #t values('UP','A','I','1-Jan',1,345)
INSERT INTO #t values('UP','R','S','1-Feb',1,456)
INSERT INTO #t values('UP','A','S','1-Jan',2,567)
INSERT INTO #t values('DL','R','S','1-Feb',3,678)
INSERT INTO #t values('DL','T','S','1-Jan',1,789)
INSERT INTO #t values('DL','A','S','1-Jan',2,900)
INSERT INTO #t values('MN','T','S','1-Jan',3,1011)
INSERT INTO #t values('MN','R','I','1-Feb',1,1122)
INSERT INTO #t values('MN','S','I','1-Feb',2,1233)
SELECT States,ISNULL(A,0) A,ISNULL(R,0) R,ISNULL(T,0) T,ISNULL(A,0)+ISNULL(R,0)+ISNULL(T,0) total
FROM
(
SELECT States,LAB,Count(ID) AS cnt FROM #t GROUP BY States,LAB /*apply GROUP DATE CODE condition here*/
) AS PVT
PIVOT(MAX(cnt) FOR LAB IN (A,R,T)) pvt
Another solution using PIVOT :
WITH PivotInUse AS (
SELECT state,lab,COUNT(*) AS cnt
FROM YourTable
GROUP BY state,lab
)
SELECT STATE
,COALESCE([A], 0) AS A
,COALESCE([R], 0) AS R
,COALESCE([T], 0) AS T
,COALESCE([A], 0) + COALESCE([R], 0) + COALESCE([T], 0) AS TOTAL
FROM PivotInUse
PIVOT(SUM(cnt) FOR lab IN ([A],[R],[T])) AS p;
Your sample table
SELECT * INTO #TEMP FROM
(
SELECT 'UP' [State],'A' LAB,'I' [GROUP],'1-Jan' [DATE],1 CODE,345 ID
UNION ALL
SELECT 'UP','R','S','1-Feb',1,456
UNION ALL
SELECT 'UP','A','S','1-Jan',2,567
UNION ALL
SELECT 'DL','R','S','1-Feb',3,678
UNION ALL
SELECT 'DL','T','S','1-Jan',1,789
UNION ALL
SELECT 'DL','A','S','1-Jan',2,900
UNION ALL
SELECT 'MN','T','S','1-Jan',3,1011
UNION ALL
SELECT 'MN','R','I','1-Feb',1,1122
UNION ALL
SELECT 'MN','S','I','1-Feb',2,1233
)TAB
Now you need to get the distinct count of each state and get the sum as the result to show Total
in pivoted result.
SELECT DISTINCT [State],LAB,SUM(CNT) CNT
INTO #NEWTABLE
FROM
(
SELECT DISTINCT
[State],LAB,
CASE WHEN [State] IS NULL THEN NULL ELSE COUNT([State]) OVER(PARTITION BY [State],LAB) END CNT
FROM #TEMP
)TAB
GROUP BY [State],LAB
WITH ROLLUP
Now we need to get the distinct columns for pivot(#cols) and columns to identify and replace null with zero in pivot(#NullToZeroCols)
DECLARE #cols NVARCHAR (MAX)
DECLARE #NullToZeroCols NVARCHAR (MAX)
SET #cols = SUBSTRING((SELECT DISTINCT ',['+LAB+']' FROM #NEWTABLE GROUP BY LAB FOR XML PATH('')),2,8000)
SET #NullToZeroCols = SUBSTRING((SELECT DISTINCT ',ISNULL(['+LAB+'],0) AS ['+LAB+']'
FROM #NEWTABLE GROUP BY LAB FOR XML PATH('')),2,8000)
Join the pivotted query with the #NEWTABLE to get the Total for each State
DECLARE #query NVARCHAR(MAX)
SET #query = 'SELECT P.State,' + #NullToZeroCols + ',T2.CNT TOTAL FROM
(
SELECT DISTINCT [State],LAB,CNT FROM #NEWTABLE
) x
PIVOT
(
SUM(CNT)
FOR [LAB] IN (' + #cols + ')
) p
JOIN #NEWTABLE T2 ON P.[STATE]=T2.[STATE]
WHERE P.State IS NOT NULL AND T2.LAB IS NULL AND T2.[STATE] IS NOT NULL;'
EXEC SP_EXECUTESQL #query
Here is your result
Here is the SQLFiddle http://sqlfiddle.com/#!3/c2588/1 (If it shows any error while loading the page, just click RUNSQL, it will work)
Now if you want to get the result as you said DISTINCT COUNT OF ID FOR EACH LAB FOR EACH STATE, just change
OVER(PARTITION BY [State],LAB)
to
OVER(PARTITION BY [State],LAB,Id)
which will show the following result after executing the pivot query
Related
I have a requirement where my client wants me to retrieve specific information from a text column
Following is the sample of the same
the student scored following result: class: 6 subject: result: english 80 math 23
science 45
The expected outcome needs to be like -
English Maths Science
80 23 45
I tried using string_split
select value from STRING_SPLIT( (select value from mytable where [student roll number] = 'SCH-01097') , ' ' )
but that only split the value into multiple rows that can't be queried.
I also tried using LTRIM with CHARINDEX approach, but the column have different text and not always organized. the initial text is different most of the time.
can this be done?
edit - I am close but just not there yet
So far I have reached here
SELECT VALUE FROM STRING_SPLIT ((select
substring(value, charindex('Block',value),1000)
from mytable where [rollnumber ] = 'SCH-01097'),' ') WHERE VALUE <> ' '
this gives me everything I need but in a single column
class6:
Subject
result
english
80
math
23
science
45
now how to make it in desired table form?
To maintain the order of the split values this answer uses DelimitedSplit8K. Something like this works.
[Edit] Instead of having specific strings in a CTE, the query now uses 'stems' to map multiple strings to the same class. For example, if English is entered as En it will still be mapped to English.
Table and data
drop table if exists #tTest;
go
create table #tTest(
string Varchar(256));
insert #tTest(string) values
('the student scored following result: class: 6 subject: result: english 80 math 23');
DelimitedSplit8k
CREATE FUNCTION dbo.DelimitedSplit8K
--===== Define I/O parameters
(#pString VARCHAR(8000), #pDelimiter CHAR(1))
--WARNING!!! DO NOT USE MAX DATA-TYPES HERE! IT WILL KILL PERFORMANCE!
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
--===== "Inline" CTE Driven "Tally Table" produces values from 1 up to 10,000...
-- enough to cover VARCHAR(8000)
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(#pString),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#pString,t.N,1) = #pDelimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#pDelimiter,#pString,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = SUBSTRING(#pString, l.N1, l.L1)
FROM cteLen l
;
Query
;with
stems_cte(stem, word) as (
select 'English', 'English' union all
select 'En', 'English' union all
select 'Math', 'Math' union all
select 'Maths', 'Math' union all
select 'Science', 'Science'),
splt_cte(string, str_val, ndx, lead_ndx, lead_len, rn) as (
select t.string, ds.Item, charndx.ndx,
lead(charndx.ndx) over (order by ds.ItemNumber),
lead(len(ds.[Item])) over (order by ds.ItemNumber),
ItemNumber
from #tTest t
cross apply dbo.DelimitedSplit8K(t.string, ' ') ds
cross apply (select charindex(ds.Item, t.string, 1) ndx) charndx
where Item <> ' '),
spec_rows_cte(word, ndx, lead_ndx, lead_len, rn) as (
select sp.word, sc.ndx, sc.lead_ndx, sc.lead_len, sc.rn
from splt_cte sc
join stems_cte sp on sc.str_val=sp.stem)
select max(case when src.word='English' then substring(sc.string, src.lead_ndx, src.lead_len) else null end) English,
max(case when src.word='Math' then substring(sc.string, src.lead_ndx, src.lead_len) else null end) Math,
max(case when src.word='Science' then substring(sc.string, src.lead_ndx, src.lead_len) else null end) Science
from splt_cte sc
join spec_rows_cte src on sc.rn=src.rn;
Output
English Math Science
80 23 NULL
You can insert the results from your query into a table variable with an Identity column, then get the next row for each required subset:
declare #tmp table (Id int identity, Value varchar(20))
Insert into #tmp (VALUE)
SELECT VALUE FROM STRING_SPLIT ((select
substring(value, charindex('Block',value),1000)
from mytable where [rollnumber ] = 'SCH-01097'),' ') WHERE VALUE <> ' '
select
English = (Select top 1 Value From #tmp where Id = (Select Id + 1 From #tmp where Value = 'english')),
Math = (Select top 1 Value From #tmp where Id = (Select Id + 1 From #tmp where Value = 'math')),
Science = (Select top 1 Value From #tmp where Id = (Select Id + 1 From #tmp where Value = 'science'))
Output:
I have this:
SELECT NEWID() as id,
'OwnerReassign' as name,
1 as TypeId,
'MyOrganisation' as OrgName,
'07DA8E53-74BD-459C-AF94-A037897A51E3' as SystemUserId,
0 as StatusId,
GETDATE() as CreatedAt,
'{"EntityName":"account","Ids":["'+CAST(AccountId as varchar(50))+'"],"OwnerId":"0C01C994-1205-E511-988E-26EE4189191B"}' as [Parameters]
FROM Account
WHERE OwnerIdName IN ('John Smith') AND New_AccountType = 1
Within the parameter field is an id (0C01C994-1205-E511-988E-26EE4189191B). Is it possible it could sequentially assign a different id from a list for each row? There are 5 id's in total.
What i'm trying to get to is this result set equally split between the 5 different id's.
Thanks
You can add one more NEWID() in the sub query and handle in the SELECT as below:
SELECT id, [name], TypeId, OrgName, SystemUserId, StatusId, CreatedAt,
'{"EntityName":"account","Ids":["' + AccountId +'"],"OwnerId":"' + ParamId + '"}' as [Parameters]
FROM (
SELECT NEWID() as id,
'OwnerReassign' as name,
1 as TypeId,
'MyOrganisation' as OrgName,
'07DA8E53-74BD-459C-AF94-A037897A51E3' as SystemUserId,
0 as StatusId,
GETDATE() as CreatedAt,
CAST(NEWID() AS VARCHAR (36)) as ParamId,
CAST(AccountId as varchar(50)) as AccountId
FROM Account
WHERE OwnerIdName IN ('John Smith') AND New_AccountType = 1
) A
You can use something like the following. Basically, use a row number for both your IDs and your data table to update, then do a MOD (%) operation with the amount of ID's you want to assign, so your data table to update is split into N groups. Then use that group ID to assign each ID.
IF OBJECT_ID('tempdb..#IDsToAssign') IS NOT NULL
DROP TABLE #IDsToAssign
CREATE TABLE #IDsToAssign (
IDToAssign VARCHAR(100))
-- 3 IDs example
INSERT INTO #IDsToAssign (
IDToAssign)
SELECT IDToAssign = NEWID()
UNION ALL
SELECT IDToAssign = NEWID()
UNION ALL
SELECT IDToAssign = NEWID()
DECLARE #AmountIDsToAssign INT = (SELECT COUNT(1) FROM #IDsToAssign)
IF OBJECT_ID('tempdb..#Account') IS NOT NULL
DROP TABLE #Account
CREATE TABLE #Account (
PrimaryKey INT PRIMARY KEY,
AssignedID VARCHAR(100))
-- 10 Rows example
INSERT INTO #Account (
PrimaryKey)
VALUES
(100),
(200),
(351),
(154),
(194),
(345),
(788),
(127),
(124),
(14)
;WITH DataRowNumber AS
(
SELECT
A.*,
RowNumber = ROW_NUMBER() OVER (ORDER BY (SELECT NULL))
FROM
#Account AS A
),
IDsRowNumbers AS
(
SELECT
D.IDToAssign,
RowNumber = ROW_NUMBER() OVER (ORDER BY D.IDToAssign)
FROM
#IDsToAssign AS D
),
NewIDAssignation AS
(
SELECT
R.*,
IDRowNumberAssignation = (R.RowNumber % #AmountIDsToAssign) + 1
FROM
DataRowNumber AS R
)
UPDATE A SET
AssignedID = R.IDToAssign
FROM
NewIDAssignation AS N
INNER JOIN IDsRowNumbers AS R ON N.IDRowNumberAssignation = R.RowNumber
INNER JOIN #Account AS A ON N.PrimaryKey = A.PrimaryKey
SELECT
*
FROM
#Account AS A
ORDER BY
A.AssignedID
/* Results:
PrimaryKey AssignedID
----------- ------------------------------------
124 1CC7F0F1-7EDE-4F7F-B0A3-739D74A62390
194 1CC7F0F1-7EDE-4F7F-B0A3-739D74A62390
351 1CC7F0F1-7EDE-4F7F-B0A3-739D74A62390
788 2A58A573-EDCB-428E-A87A-6BFCED265A9C
200 2A58A573-EDCB-428E-A87A-6BFCED265A9C
127 2A58A573-EDCB-428E-A87A-6BFCED265A9C
14 2A58A573-EDCB-428E-A87A-6BFCED265A9C
100 FD8036DA-0E15-453E-8A59-FA3C2BDB8FB1
154 FD8036DA-0E15-453E-8A59-FA3C2BDB8FB1
345 FD8036DA-0E15-453E-8A59-FA3C2BDB8FB1
*/
The ordering of the ROW_NUMBER() function will determine how ID's are assigned.
You could potentially do this by using the ROW_NUMBER() field in a subquery; for example:
SELECT NEWID() as id, 'OwnerReassign' as name, 1 as TypeId,
'MyOrganisation' as OrgName,
'07DA8E53-74BD-459C-AF94-A037897A51E3' as SystemUserId,
0 as StatusId, GETDATE() as CreatedAt,
case B / ##ROWCOUNT
when 0 then '0C01C994-1205-E511-988E-26EE4189191B'
when 1 then '12345677-1205-E511-988E-26EE4189191B'
when 2 then '66666666-1205-E511-988E-26EE4189191B'
etc...
end
FROM
(
SELECT ROW_NUMBER() OVER (ORDER BY A.Id)
FROM Account A
WHERE OwnerIdName IN ('John Smith') AND New_AccountType = 1
) AS B
If you want the system to pick those values then you could put then in their own temporary table, too.
I have a column in a table which has incremented values like:
AAA0000001
AAA0000002
... and so on
I want to find if the values stored in this column are in proper sequential order or if any value is missing in between or is deleted.
How can i achieve this?
Assuming the pattern is always: AAA[0-9][0-9][0-9][0-9][0-9][0-9][0-9], you can do this with a Tally Table.
Sample Data:
CREATE TABLE Tbl(val VARCHAR(10))
INSERT INTO Tbl VALUES
('AAA0000001'), ('AAA0000002'), ('AAA0000004'), ('AAA0000011');
val
----------
AAA0000001
AAA0000002
AAA0000004
AAA0000011
SQL Fiddle
;WITH Cte AS(
SELECT *,
num = CAST(SUBSTRING(val, 4, LEN(val) - 3) AS INT)
FROM Tbl
),
E1(N) AS(
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
),
E2(N) AS(SELECT 1 FROM E1 a CROSS JOIN E1 b),
E4(N) AS(SELECT 1 FROM E2 a CROSS JOIN E2 b),
Tally(N) AS(
SELECT TOP(SELECT MAX(num) FROM Cte)
ROW_NUMBER() OVER(ORDER BY (SELECT NULL))
FROM E4
)
SELECT
N,
val = 'AAA' + RIGHT('0000000' + CAST(N AS VARCHAR(7)), 7)
FROM Tally
WHERE NOT EXISTS(
SELECT 1 FROM Cte WHERE num = N
)
RESULT
N val
-------------------- ----------
3 AAA0000003
5 AAA0000005
6 AAA0000006
7 AAA0000007
8 AAA0000008
9 AAA0000009
10 AAA0000010
Explanation:
The first CTE, named as Cte, extracts the numeric part of the strings and CASTs them to INT.
The succeeding CTEs, from E1 to Tally(N) generates a table with sequential values from 1 up to the MAX(num) - the INT return from the first CTE.
The final SELECT just checks for the non-existing num from the first CTE.
'AAA' + RIGHT('0000000' + CAST(N AS VARCHAR(7)), 7) transforms N so that it follows the pattern.
This is a Gaps problem. You can look into this article by Dwain Camps for more solutions on Gaps and Islands.
You can use ROW_NUMBER like this.
Sample Data
DECLARE #tab1 TABLE(id VARCHAR(20));
insert into #tab1 VALUES('AAA0000001'),('AAA0000002'),('AAA0000003'),('AAA0000004'),('AAA0000006'),('AAA0000007'),('AAA0000010');
Query
;WITH CTE as
(
SELECT convert(int,STUFF(id,1,3,'')) id,convert(int,STUFF(id,1,3,'')) - ROW_NUMBER()OVER(ORDER BY convert(int,STUFF(id,1,3,''))) rn
FROM #tab1
),CTE2 as
(
SELECT ROW_NUMBER()OVER(ORDER BY rn) as rn, MIN(id) series_start,MAX(id) series_end
FROM CTE
GROUP BY rn
)
SELECT C2.series_end,C1.series_start
FROM CTE2 C1
INNER JOIN CTE2 C2 ON C1.rn = C2.rn + 1;
SQL Fiddle
Explanation
Output of CTE is the difference of gaps between id values.
Output of CTE2 is the start and end of continuous series of numbers
Final Output gives the start and end of gaps within the series
Output
series_end series_start
4 6
7 10
If the schema is fixed then no need for complex queries. This works:
DECLARE #t TABLE ( v VARCHAR(100) );
INSERT INTO #t
VALUES ( 'AAA0000001' ),
( 'AAA0000002' ),
( 'AAA0000007' ),
( 'AAA0000008' ),
( 'AAA0000010' ),
( 'AAA0000011' ),
( 'AAA0000012' );
SELECT * FROM #t t1
CROSS APPLY(SELECT TOP 1 v FROM #t t2 WHERE t2.v > t1.v ORDER BY v) ca
WHERE RIGHT(t1.v, 7) <> RIGHT(ca.v, 7) - 1
Output:
v v
AAA0000002 AAA0000007
AAA0000008 AAA0000010
In sqlserver 2012, you can use LAG and LEAD
DECLARE #t table(col1 varchar(15))
INSERT #t values('AAA0000001'),('AAA0000002'),('AAA0000004')
SELECT
case when
stuff(lag(col1) over (order by col1), 1,3,'') + 1
= stuff(col1, 1,3,'') then 'Yes' else 'No' end previous_exists,
case when
stuff(lead(col1) over (order by col1), 1,3,'') - 1
= stuff(col1, 1,3,'') then 'Yes' else 'No' end next_exists,
col1
FROM #t
Result:
previous_exists next_exists col1
No Yes AAA0000001
Yes No AAA0000002
No No AAA0000004
I have exhausted my search for a solution and would like to post my question to see if a solution exists.
I need to write a report to show the debits and credits per branch. The report needs also show if branches have had no DBs or CRs.
For simplicity I have scaled down my tables to try highlight my issue.
My first table holds my Branch Data
BranchNo BranchName
1 Main
2 Mgorogoro
3 Arusha
My second table holds all Debit Transactions
txid Narrative Amount Date BranchNo
1 Test 1 50.00 2014/11/26 1
2 Test 2 20.00 2014/11/27 3
I've written a SQL statement that gives me the results I need:-
DECLARE #get_Dates CURSOR;
DECLARE #Date VarChar(10);
DECLARE #tbl TABLE
(
DebitOutCount int,
BranchCode VarChar(250),
TxDate VarChar(10)
)
--DECLARE #tbl TABLE(Idx1 VarChar(50), Idx8 VarChar(50), Idx3 VarChar(50))
SET #get_Dates = CURSOR FOR
Select Debits_OUT.Date FROM Debits_OUT GROUP BY Debits_OUT.Date ORDER BY Debits_OUT.Date
OPEN #get_Dates;
FETCH NEXT FROM #get_Dates into #Date;
WHILE (##FETCH_STATUS = 0)
BEGIN
--INSERT INTO #tbl SELECT Idx1, Idx8, Idx3 FROM SessionDailyControl WHERE Idx1 = #sessionId
INSERT INTO #tbl
SELECT
(SELECT ISNULL(SUM(DB_OUT.Amount), 0) FROM Debits_OUT AS DB_OUT WHERE B.BranchNo = DB_OUT.BranchNo AND DB_OUT.Date = #Date) AS DebitOutValue,
CAST(B.BranchNo As VarChar(10)) + ' ' + B.BranchName As [Branch Names],
#Date
From exBranches As B
FETCH NEXT FROM #get_Dates into #Date
END
CLOSE #get_Dates
DEALLOCATE #get_Dates
SELECT * FROM #tbl
The result is in the format that I need:-
DebitOutCount BranchCode TxDate
50 1 Main 2014/11/26
0 2 Mgorogoro 2014/11/26
0 3 Arusha 2014/11/26
0 1 Main 2014/11/27
0 2 Mgorogoro 2014/11/27
20 3 Arusha 2014/11/27
However, the report tools and Views cannot work with the above. I have tried Left Joins - but the problem is the result set will not keep the branch numbers for dates where there were zero transactions. For Example:-
SELECT
ISNULL(SUM(B.Amount), 0),
CAST(A.BranchNo As VarChar(10)) + ' ' + A.BranchName As [Branch Names],
B.Date
From exBranches As A
LEFT JOIN Debits_OUT AS B ON A.BranchNo = B.BranchNo
GROUP BY B.Date, A.BranchNo, A.BranchName
ORDER BY B.Date, A.BranchNo, A.BranchName
Returns:-
DB_OUT Branch Names Date
0.00 2 Mgorogoro NULL
50.00 1 Main 2014/11/26
20.00 3 Arusha 2014/11/27
In all the JOIN combinations that I try, I cannot get the branches to show ALL the branches for each date that is in the debits table.
Is there a fundamental concept that I have completely missed? I need have a query that can be run in a view that returns the same data as the cursor statement. Is this possible?
The idea is to generate possible combinations of Branches and dates first:
create table exBranches(
BranchNo int,
BranchName varchar(20)
)
create table Debits_OUT(
txId int,
Narrative varchar(20),
Amount decimal (6,2),
[Date] date,
BranchNo int
)
insert into exBranches values (1, 'Main'), (2, 'Mgorogoro'), (3, 'Arusha')
insert into Debits_OUT values (1, 'Test 1', 50.00, '20141126', 1), (2, 'Test 2', 20.00, '20141127', 3);
with BranchDate as(
select
b.BranchNo,
b.BranchName,
d.Date
from exBranches b
cross join (
select distinct [Date] from Debits_OUT
)d
)
select
isnull(DebitOutCount,0),
cast(b.BranchNo as varchar(10)) + ' ' + b.BranchName as BranchName,
b.Date
from BranchDate b
left join (
select
branchNo,
[Date],
sum(Amount) as DebitOutCount
from Debits_OUT
group by
BranchNo, [Date]
)d
on d.BranchNo = b.BranchNo
and d.Date = b.Date
order by b.date, b.BranchNo asc
drop table exBranches
drop table Debits_OUT
Try This it's works.....
select BranchName,amount,date1,BranchNo into #temp from exBranches
cross join (select distinct date1,amount from Debits_OUT)a
select isnull(t.amount,0),a.BranchName,a.date1 from #temp a
left join Debits_OUT t on t.BNo=a.BranchNo and a.date1=t.date1
order by date1
view here..
http://sqlfiddle.com/#!3/ad815/1
How could I put those multiple rows into one line, and the contents are in different columns:
From:
ID | Subject1/Catalog/Session
10868952 | NUR/3110/D507
10868952 | NUR/3110/D512
10868952 | NUR/4010/D523
10868952 | NUR/4010/HD20
To
ID |Subject1/Catalog/Session |Subject2/Catalog/Session | Subject3/Catalog/Session |Subject4/Catalog/Session | Subject5/Catalog/Session
10868952 |NUR/3110/D507 | NUR/3110/D512 | NUR/4010/D523 | NUR/4010/HD20 |
Would be best if in the future you can provide ddl and sample data. I did this for you this time.
Here is how you could do this if you know the number of elements per row. I put links in the comments of the original post to both the static and dynamic versions of this type of approach.
if OBJECT_ID('tempdb..#Something') is not null
drop table #Something
create table #Something
(
ID int,
Subject1 varchar(50)
)
insert #Something
select 10868952, 'NUR/3110/D507' union all
select 10868952, 'NUR/3110/D512' union all
select 10868952, 'NUR/4010/D523' union all
select 10868952, 'NUR/4010/HD20';
with OrderedResults as
(
select *, ROW_NUMBER() over(partition by ID order by Subject1) as RowNum
from #Something
)
select ID
, MAX(Case when RowNum = 1 then Subject1 end) as Subject1
, MAX(Case when RowNum = 2 then Subject1 end) as Subject2
, MAX(Case when RowNum = 3 then Subject1 end) as Subject3
, MAX(Case when RowNum = 4 then Subject1 end) as Subject4
from OrderedResults
group by ID
Here is how you can do this as a dynamic pivot. There are a number of concepts going on here. One is a tally table. In this code it is implemented as a cte. In my actual system I have this as a view. It generates 10,000 rows with zero reads. The tally table and most of the other concepts here were learned by the immortal Jeff Moden. If you do not know what a tally table is or how they work, check out Jeff's article here. http://www.sqlservercentral.com/articles/T-SQL/62867/
I will post some code for how to do this for this example but anybody who is unfamiliar with this technique should read his article. http://www.sqlservercentral.com/articles/Crosstab/65048/
Here is a full working example of doing this as a dynamic cross tab. When you are satisfied that the sql this generates is safe feel free to uncomment the last two lines.
LAST but certainly not least. Make sure that you fully understand what this code and how it works. It is not going to be my phone that rings at 3am when something goes wrong. You are the one who will have to be there to support this code.
if OBJECT_ID('Something') is not null
drop table Something
create table Something
(
ID int,
Subject1 varchar(50)
)
insert Something
select 10868952, 'NUR/3110/D507' union all
select 10868952, 'NUR/3110/D512' union all
select 10868952, 'NUR/4010/D523' union all
select 10868952, 'NUR/4010/HD20' union all
select 12345, 'asdfasdf'
declare #MaxCols int
declare #StaticPortion nvarchar(2000) =
'with OrderedResults as
(
select *, ROW_NUMBER() over(partition by ID order by Subject1) as RowNum
from Something
)
select ID';
declare #DynamicPortion nvarchar(max) = '';
declare #FinalStaticPortion nvarchar(2000) = ' from OrderedResults Group by ID order by ID';
with E1(N) AS (select 1 from (values (1),(1),(1),(1),(1),(1),(1),(1),(1),(1))dt(n)),
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS
(
SELECT ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
)
select #DynamicPortion = #DynamicPortion +
', MAX(Case when RowNum = ' + CAST(N as varchar(6)) + ' then Subject1 end) as Subject' + CAST(N as varchar(6)) + CHAR(10)
from cteTally t
where t.N <=
(
select top 1 Count(*)
from Something
group by ID
order by COUNT(*) desc
)
select #StaticPortion + #DynamicPortion + #FinalStaticPortion
--declare #SqlToExecute nvarchar(max) = #StaticPortion + #DynamicPortion + #FinalStaticPortion;
--exec sp_executesql #SqlToExecute