i have column col1 and col2
Col1 col2
abc,def xyz,xyz
abc1,def1 xyz1,xyz1
i need output as below
Col1 col2
abc xyz,xyz
def xyz,xyz
abc1 xyz1,xyz1
def1 xyz1,xyz1
that is if col1 contains 2 value (abc,def) and col2 contains 2 value(xyz,xyz) then i need 4 rows. likewise col1 and col2 contains 2 values then i need 9 rows.
please help me to get the output in sql server
A little XML and and a CROSS APPLY
Option 1: Without a Split/Parse Function
Declare #YourTable table (Col1 varchar(25),col2 varchar(25))
Insert Into #YourTable values
('abc,def','xyz,xyz'),
('abc1,def1','xyz1,xyz1')
Select col1 = B.RetVal
,col2 = A.col2
From #YourTable A
Cross Apply (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = LTrim(RTrim(B.i.value('(./text())[1]', 'varchar(max)')))
From (Select x = Cast('<x>' + replace((Select replace(A.Col1,',','§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml).query('.')) as X
Cross Apply x.nodes('x') AS B(i)
) B
Returns
col1 col2
abc xyz,xyz
def xyz,xyz
abc1 xyz1,xyz1
def1 xyz1,xyz1
Option 2: with a Split/Parse Function
Select col1 = B.RetVal
,col2 = A.col2
From #YourTable A
Cross Apply [dbo].[udf-Str-Parse](A.col1,',') B
The UDF if interested
CREATE FUNCTION [dbo].[udf-Str-Parse] (#String varchar(max),#Delimiter varchar(10))
Returns Table
As
Return (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = LTrim(RTrim(B.i.value('(./text())[1]', 'varchar(max)')))
From (Select x = Cast('<x>' + replace((Select replace(#String,#Delimiter,'§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml).query('.')) as X
Cross Apply x.nodes('x') AS B(i)
);
--Thanks Shnugo for making this XML safe
--Select * from [dbo].[udf-Str-Parse]('Dog,Cat,House,Car',',')
--Select * from [dbo].[udf-Str-Parse]('John Cappelletti was here',' ')
--Select * from [dbo].[udf-Str-Parse]('this,is,<test>,for,< & >',',')
Here is another example:
;WITH A(Col1,col2)AS(
SELECT 'abc,def','xyz,xyz' UNION all
SELECT 'abc1,def1','xyz1,xyz1'
)
SELECT d.n,a.col2 FROM a
CROSS APPLY(VALUES(CONVERT(XML,'<n>'+REPLACE(col1,',','</n><n>')+'</n>'))) c(x)
CROSS APPLY(SELECT y.n.value('.','varchar(10)') FROM c.x.nodes('n') y(n)) d(n)
n col2
---------- ---------
abc xyz,xyz
def xyz,xyz
abc1 xyz1,xyz1
def1 xyz1,xyz1
Related
Hi I have a SQL Server table that one column has comma separated values:
12323,234322,1112,99323.....
And I have a parameter #values nvarchar(500) that will also have comma separated values.
In my query I need to check if anything from the parameter exists in my table field.
Something like this>
...
WHERE
(#values = '' OR select s from dbo.Split(',',t.Data) in ( select s from dbo.Split(',',#values )))
Of course the above gives me errors.
Any clue?
Join both tables that you got out of the split
SELECT *
...
FROM (SELECT s FROM dbo.Split(',',t.Data)) X
INNER JOIN (SELECT s FROM dbo.Split(',',#values)) Y
ON X.s = Y.s
...
EXISTS is your friend here.
WHERE
(#values = '' OR EXISTS (select a.value from string_split(t.Data, ',') a inner join ( select value from string_split(#values, ',')) b ON a.value = b.value))
Try this below code it may helps you
IF OBJECT_ID('Tempdb..#Temp') IS NOT NULL
Drop table #Temp
Declare #SearchVariable varchar(1000)='12323,234322,1112,99323,22222,4545,656565,8989,1111,22222'--Varibale Contains these values to search
CREATE TABLE #Temp (CommaValue Varchar(100))-- This is the table having comma separted value columns
INSERT INTO #Temp
SELECT '12323,234322,1112,99323' Union all
SELECT '12323,656565,1112,4545'
Declare #VariableSearch TABLE (ValueName varchar(1000))
Insert into #VariableSearch
SELECT #SearchVariable
;With cte
AS
(
SELECT Split.a.value('.', 'VARCHAR(1000)') AS TablesData
FROM (
SELECT CAST('<S>' + REPLACE(CommaValue, ',', '</S><S>') + '</S>' AS XML) AS TablesData
FROM #Temp
) AS A
CROSS APPLY TablesData.nodes('/S') AS Split(a)
)
SELECT DISTINCT ROW_NUMBER()Over(Order by (SELECT 1)) AS Rno, * from cte C Inner join
(
SELECT Split.a.value('.', 'VARCHAR(1000)') AS VariableSeachData
FROM (
SELECT CAST('<S>' + REPLACE(ValueName, ',', '</S><S>') + '</S>' AS XML) AS VariableSeachData
FROM #VariableSearch
) AS A
CROSS APPLY VariableSeachData.nodes('/S') AS Split(a)
)DT
On C.TablesData=DT.VariableSeachData
OutPut
Rno TablesData VariableSeachData
---------------------------------
1 1112 1112
2 1112 1112
3 12323 12323
4 12323 12323
5 234322 234322
6 4545 4545
7 656565 656565
8 99323 99323
Not quite sure, but maybe this can give you an idea.
using Outer Apply and EXISTS operator.
SELECT x.value
FROM Table T
OUTER APPLY ( SELECT value
FROM dbo.Split(t.data)
) X
WHERE EXISTS ( SELECT 1
FROM dbo.Split(#values) S
WHERE s.value = x.value )
Given json like this...
{"setting1":"A","setting2":"B","setting3":"C"}
I would like to see results like...
+----------+-------+
| name | value |
+----------+-------+
| setting1 | A |
| setting2 | B |
| setting3 | C |
+----------+-------+
My struggle is I'm trying to find out how to extract the key's name (i.e., "setting1", "setting2", "setting3", etc.)
I could do something like the following query, but I don't know how many settings there will be and what their names will be, so I'd like something more dynamic.
SELECT
B.name,
B.value
FROM OPENJSON(#json) WITH
(
setting1 varchar(50) '$.setting1',
setting2 varchar(50) '$.setting2',
setting3 varchar(50) '$.setting3'
) A
CROSS APPLY
(
VALUES
('setting1', A.setting1),
('setting2', A.setting2),
('setting3', A.setting3)
) B (name, value)
With XML, I could do something simple like this:
DECLARE #xml XML = '<settings><setting1>A</setting1><setting2>B</setting2><setting3>C</setting3></settings>'
SELECT
A.setting.value('local-name(.)', 'VARCHAR(50)') name,
A.setting.value('.', 'VARCHAR(50)') value
FROM #xml.nodes('settings/*') A (setting)
Any way to do something similar with SQL Server's json functionality?
Aaron Bertrand has written about json key value in Advanced JSON Techniques
SELECT x.[Key], x.[Value]
FROM OPENJSON(#Json, '$') AS x;
Return
Key Value
------------------
setting1 A
setting2 B
setting3 C
Option Using a Table
Declare #YourTable table (ID int,JSON_String varchar(max))
Insert Into #YourTable values
(1,'{"setting1":"A","setting2":"B","setting3":"C"}')
Select A.ID
,C.*
From #YourTable A
Cross Apply (values (try_convert(xml,replace(replace(replace(replace(replace(JSON_String,'"',''),'{','<row '),'}','"/>'),':','="'),',','" '))) ) B (XMLData)
Cross Apply (
Select Name = attr.value('local-name(.)','varchar(100)')
,Value = attr.value('.','varchar(max)')
From B.XMLData.nodes('/row') as C1(r)
Cross Apply C1.r.nodes('./#*') as C2(attr)
) C
Returns
ID Name Value
1 setting1 A
1 setting2 B
1 setting3 C
Option Using a String Variable
Declare #String varchar(max) = '{"setting1":"A","setting2":"B","setting3":"C"}'
Select C.*
From (values (try_convert(xml,replace(replace(replace(replace(replace(#String,'"',''),'{','<row '),'}','"/>'),':','="'),',','" '))) ) A (XMLData)
Cross Apply (
Select Name = attr.value('local-name(.)','varchar(100)')
,Value = attr.value('.','varchar(max)')
From A.XMLData.nodes('/row') as C1(r)
Cross Apply C1.r.nodes('./#*') as C2(attr)
) C
Returns
Name Value
setting1 A
setting2 B
setting3 C
If you are open to a TVF.
The following requires my Extract UDF. This function was created because I was tired of extracting string (patindex,charindex,left,right, etc). It is a modified tally parse which accepts two non-like delimiters.
Example
Declare #YourTable table (ID int,JSON_String varchar(max))
Insert Into #YourTable values
(1,'{"setting1":{"global":"A","type":"1"},"setting2":{"global":"B","type":"1"},"setting3":{"global":"C","type":"1"}} ')
Select A.ID
,B.Setting
,C.*
From #YourTable A
Cross Apply (
Select Setting = replace(replace(B1.RetVal,'"',''),'{','')
,B2.RetVal
From [dbo].[udf-Str-Extract](A.JSON_String,',',':{') B1
Join [dbo].[udf-Str-Extract](A.JSON_String,':{','}') B2
on B1.RetSeq=B2.RetSeq
) B
Cross Apply (
Select Name = C1.RetVal
,Value = C2.RetVal
From [dbo].[udf-Str-Extract](','+B.RetVal,',"','":') C1
Join [dbo].[udf-Str-Extract](B.RetVal+',',':"','",') C2
on C1.RetSeq=C2.RetSeq
) C
Returns
ID Setting Name Value
1 setting1 global A
1 setting1 type 1
1 setting2 global B
1 setting2 type 1
1 setting3 global C
1 setting3 type 1
The UDF if Interested
CREATE FUNCTION [dbo].[udf-Str-Extract] (#String varchar(max),#Delimiter1 varchar(100),#Delimiter2 varchar(100))
Returns Table
As
Return (
with cte1(N) As (Select 1 From (Values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) N(N)),
cte2(N) As (Select Top (IsNull(DataLength(#String),0)) Row_Number() over (Order By (Select NULL)) From (Select N=1 From cte1 N1,cte1 N2,cte1 N3,cte1 N4,cte1 N5,cte1 N6) A ),
cte3(N) As (Select 1 Union All Select t.N+DataLength(#Delimiter1) From cte2 t Where Substring(#String,t.N,DataLength(#Delimiter1)) = #Delimiter1),
cte4(N,L) As (Select S.N,IsNull(NullIf(CharIndex(#Delimiter1,#String,s.N),0)-S.N,8000) From cte3 S)
Select RetSeq = Row_Number() over (Order By N)
,RetPos = N
,RetVal = left(RetVal,charindex(#Delimiter2,RetVal)-1)
From (
Select *,RetVal = Substring(#String, N, L)
From cte4
) A
Where charindex(#Delimiter2,RetVal)>1
)
/*
Max Length of String 1MM characters
Declare #String varchar(max) = 'Dear [[FirstName]] [[LastName]], ...'
Select * From [dbo].[udf-Str-Extract] (#String,'[[',']]')
*/
I understand that this answer here: How to extract hashtags from a string in T-SQL
explains how to extract hashtags from a declared string variable, but how do I apply this operation to an entire column of strings?
Using a CROSS APPLY. Just for fun, remove the final WHERE, and see what happens
Example
Declare #YourTable table (ID int,SomeText varchar(max))
Insert into #YourTable values
(1, '#want to extract all #hastag out of this string, #delhi #Traffic')
,(2, '#bunny #hastag #donetodeath')
Select A.ID
,B.*
From #YourTable A
Cross Apply (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = LTrim(RTrim(B.i.value('(./text())[1]', 'varchar(max)')))
From (Select x = Cast('<x>' + replace((Select replace(replace(A.SomeText,char(13),' '),' ','§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml).query('.')) as A
Cross Apply x.nodes('x') AS B(i)
) B
Where B.RetVal like '#%'
Returns
ID RetSeq RetVal
1 1 #want
1 5 #hastag
1 10 #delhi
1 11 #Traffic
2 1 #bunny
2 2 #hastag
2 3 #donetodeath
I am looking for the cleanest SQL query to attain the following. Performance is not as important because my dataset is small.
Sample table:
Letter field holding: A, B, C, D, E,
Location field holding: UAE, CANADA, BOSTON, BAHRAIN, FRANCE
And I am looking for a result that lists every letter/location with letter in it combination, so the following result set:
A-UAE
A-CANADA
A-BAHRAIN
A-FRANCE
B-BOSTON
B-BAHRAIN
C-CANADA
C-FRANCE
D-CANADA
E-UAE
E-FRANCE
This is yet another solution:
DECLARE #Letter TABLE (
letter CHAR(1) PRIMARY KEY
);
DECLARE #Country TABLE (
name VARCHAR(100) PRIMARY KEY
);
INSERT INTO #Letter (letter)
VALUES ('A'), ('B'), ('C'), ('D'), ('E');
INSERT INTO #Country (name)
VALUES ('UAE'), ('CANADA'), ('BOSTON'), ('BAHRAIN'), ('FRANCE');
SELECT CONCAT(L.letter, ' - ', C.name)
FROM #Letter AS L
INNER JOIN #Country AS C
ON C.name LIKE '%' + L.letter + '%'
ORDER BY L.letter, C.name;
Result:
A - BAHRAIN
A - CANADA
A - FRANCE
A - UAE
B - BAHRAIN
B - BOSTON
C - CANADA
C - FRANCE
D - CANADA
E - FRANCE
E - UAE
Hopefull this outputs what you'd expect.
You can run this query on Stack Exchange Data: https://data.stackexchange.com/stackoverflow/query/622821
Alternatively, if performance becomes issue, you could create a seperate table which would store each country name and its' unique letters, so you could make a simple join instead of LIKEing to compare things:
DECLARE #CountrySplit TABLE (
letter CHAR(1)
, name VARCHAR(100)
, PRIMARY KEY (letter, name)
);
INSERT INTO #CountrySplit (letter, name)
SELECT DISTINCT SUBSTRING(C.name, v.number + 1, 1), C.name
FROM #Country AS C
INNER JOIN master..spt_values AS V
ON V.number < LEN(C.name)
WHERE V.type = 'P';
SELECT CONCAT(L.letter, ' - ', CS.name) AS Result
FROM #CountrySplit AS CS
INNER JOIN #Letter AS L
ON L.letter = CS.letter;
This is query on Stack Exchange Data:
https://data.stackexchange.com/stackoverflow/query/622841
Credits to this answer for string split: T-SQL Split Word into characters
With the help of a Parse/Split UDF and a Cross Apply.
I added an ID to demonstrate that this can be run for the entire table
Example
Declare #YourTable table (ID int,Letter varchar(50),Location varchar(50))
Insert Into #YourTable values
(1,'A, B, C, D, E,','UAE, CANADA, BOSTON, BAHRAIN, FRANCE')
Select A.ID
,B.*
From #YourTable A
Cross Apply (
Select NewValue = B1.RetVal+'-'+B2.RetVal
From [dbo].[udf-Str-Parse](A.Letter,',') B1
Join [dbo].[udf-Str-Parse](A.Location,',') B2
on charindex(B1.RetVal,B2.RetVal)>0
) B
Returns
ID NewValue
1 A-UAE
1 A-CANADA
1 A-BAHRAIN
1 A-FRANCE
1 B-BOSTON
1 B-BAHRAIN
1 C-CANADA
1 C-FRANCE
1 D-CANADA
1 E-UAE
1 E-FRANCE
The UDF if needed
CREATE FUNCTION [dbo].[udf-Str-Parse] (#String varchar(max),#Delimiter varchar(10))
Returns Table
As
Return (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = LTrim(RTrim(B.i.value('(./text())[1]', 'varchar(max)')))
From (Select x = Cast('<x>' + replace((Select replace(#String,#Delimiter,'§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml).query('.')) as A
Cross Apply x.nodes('x') AS B(i)
);
--Thanks Shnugo for making this XML safe
--Select * from [dbo].[udf-Str-Parse]('Dog,Cat,House,Car',',')
--Select * from [dbo].[udf-Str-Parse]('John Cappelletti was here',' ')
--Select * from [dbo].[udf-Str-Parse]('this,is,<test>,for,< & >',',')
EDIT - Option without a UDF
Declare #YourTable table (ID int,Letter varchar(50),Location varchar(50))
Insert Into #YourTable values
(1,'A, B, C, D, E,','UAE, CANADA, BOSTON, BAHRAIN, FRANCE')
Select A.ID
,B.*
From #YourTable A
Cross Apply (
Select NewValue = B1.RetVal+'-'+B2.RetVal
From (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = LTrim(RTrim(B.i.value('(./text())[1]', 'varchar(max)')))
From (Select x = Cast('<x>' + replace((Select replace(A.Letter,',','§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml).query('.')) as A
Cross Apply x.nodes('x') AS B(i)
) B1
Join (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = LTrim(RTrim(B.i.value('(./text())[1]', 'varchar(max)')))
From (Select x = Cast('<x>' + replace((Select replace(A.Location,',','§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml).query('.')) as A
Cross Apply x.nodes('x') AS B(i)
) B2
on charindex(B1.RetVal,B2.RetVal)>0
) B
I'm aware that the "combine multiple rows into list" question has been answered a million times, and here's a reference to an awesome article: Concatenating row values in transact sql
I have a need to combine multiple rows into lists for multiple columns at the same time
ID | Col1 | Col2 ID | Col1 | Col2
------------------ => ------------------
1 A X 1 A X
2 B Y 2 B,C Y,Z
2 C Z
I tried to use the xml method, but this has proven to be very slow over large tables
SELECT DISTINCT
[ID],
[Col1] = STUFF((SELECT ',' + t2.[Col1]
FROM #Table t2
WHERE t2.ID = t.ID
FOR XML PATH(''), TYPE).value('.', 'nvarchar(max)'),1,1,''),
[Col2] = STUFF((SELECT ',' + t2.[Col2]
FROM #Table t2
WHERE t2.ID = t.ID
FOR XML PATH(''), TYPE).value('.', 'nvarchar(max)'),1,1,''),
FROM #Table t
My current solution is to use a stored procedure that builds each ID row separately. I'm wondering if there's another approach I could use (other than using a loop)
For each column, rank the rows to combine (partition by the key column)
End up with a table like
ID | Col1 | Col2 | Col1Rank | Col2Rank
1 A X 1 1
2 B Y 1 1
2 C Z 2 2
Create a new table containing top rank columns for each ID
ID | Col1Comb | Col2Comb
1 A X
2 B Y
Loop through each remaining rank in increasing order (in this case 1 iteration)
for irank = 0; irank <= 1; irank++
update n set
n.col1Comb = n.Col1Comb + ',' + o.Col1, -- so append the rank 2 items
n.col2comb = n.Col2Comb + ',' + o.Col2 -- if they are not null
from #newtable n
join #oldtable o
on o.ID = n.ID
where o.col1rank = irank or o.col2rank = irank
A CTE trick can be used where you update the CTE.
Method 1: a new parallel table to which the data is copied and then concatenated:
CREATE TABLE #Table1(ID INT, Col1 VARCHAR(1), Col2 VARCHAR(1), RowID INT IDENTITY(1,1));
CREATE TABLE #Table1Concat(ID INT, Col3 VARCHAR(MAX), Col4 VARCHAR(MAX), RowID INT);
GO
INSERT #Table1 VALUES(1,'A','X'), (2,'B','Y'), (2,'C','Z');
GO
INSERT #Table1Concat
SELECT * FROM #Table1;
GO
DECLARE #Cat1 VARCHAR(MAX) = '';
DECLARE #Cat2 VARCHAR(MAX) = '';
; WITH CTE AS (
SELECT TOP 2147483647 t1.*, t2.Col3, t2.Col4, r = ROW_NUMBER()OVER(PARTITION BY t1.ID ORDER BY t1.Col1, t1.Col2)
FROM #Table1 t1
JOIN #Table1Concat t2 ON t1.RowID = t2.RowID
ORDER BY t1.ID, t1.Col1, t1.Col2
)
UPDATE CTE
SET #Cat1 = Col3 = CASE r WHEN 1 THEN ISNULL(Col1,'') ELSE #Cat1 + ',' + Col1 END
, #Cat2 = Col4 = CASE r WHEN 1 THEN ISNULL(Col2,'') ELSE #Cat2 + ',' + Col2 END;
GO
SELECT ID, Col3 = MAX(Col3)
, Col4 = MAX(Col4)
FROM #Table1Concat
GROUP BY ID
Method 2: Add the concatenation columns directly to the original table and concatenate the new columns:
CREATE TABLE #Table1(ID INT, Col1 VARCHAR(1), Col2 VARCHAR(1), Col1Cat VARCHAR(MAX), Col2Cat VARCHAR(MAX));
GO
INSERT #Table1(ID,Col1,Col2) VALUES(1,'A','X'), (2,'B','Y'), (2,'C','Z');
GO
DECLARE #Cat1 VARCHAR(MAX) = '';
DECLARE #Cat2 VARCHAR(MAX) = '';
; WITH CTE AS (
SELECT TOP 2147483647 t1.*, r = ROW_NUMBER()OVER(PARTITION BY t1.ID ORDER BY t1.Col1, t1.Col2)
FROM #Table1 t1
ORDER BY t1.ID, t1.Col1, t1.Col2
)
UPDATE CTE
SET #Cat1 = Col1Cat = CASE r WHEN 1 THEN ISNULL(Col1,'') ELSE #Cat1 + ',' + Col1 END
, #Cat2 = Col2Cat = CASE r WHEN 1 THEN ISNULL(Col2,'') ELSE #Cat2 + ',' + Col2 END;
GO
SELECT ID, Col1Cat = MAX(Col1Cat)
, Col2Cat = MAX(Col2Cat)
FROM #Table1
GROUP BY ID;
GO
Try this one -
Query1:
DECLARE #temp TABLE
(
ID INT
, Col1 VARCHAR(30)
, Col2 VARCHAR(30)
)
INSERT INTO #temp (ID, Col1, Col2)
VALUES
(1, 'A', 'X'),
(2, 'B', 'Y'),
(2, 'C', 'Z')
SELECT
r.ID
, Col1 = STUFF(REPLACE(REPLACE(CAST(d.x.query('/t1/a') AS VARCHAR(MAX)), '<a>', ','), '</a>', ''), 1, 1, '')
, Col2 = STUFF(REPLACE(REPLACE(CAST(d.x.query('/t2/a') AS VARCHAR(MAX)), '<a>', ','), '</a>', ''), 1, 1, '')
FROM (
SELECT DISTINCT ID
FROM #temp
) r
OUTER APPLY (
SELECT x = CAST((
SELECT
[t1/a] = t2.Col1
, [t2/a] = t2.Col2
FROM #temp t2
WHERE r.ID = t2.ID
FOR XML PATH('')
) AS XML)
) d
Query 2:
SELECT
r.ID
, Col1 = STUFF(REPLACE(CAST(d.x.query('for $a in /a return xs:string($a)') AS VARCHAR(MAX)), ' ,', ','), 1, 1, '')
, Col2 = STUFF(REPLACE(CAST(d.x.query('for $b in /b return xs:string($b)') AS VARCHAR(MAX)), ' ,', ','), 1, 1, '')
FROM (
SELECT DISTINCT ID
FROM #temp
) r
OUTER APPLY (
SELECT x = CAST((
SELECT
[a] = ',' + t2.Col1
, [b] = ',' + t2.Col2
FROM #temp t2
WHERE r.ID = t2.ID
FOR XML PATH('')
) AS XML)
) d
Output:
ID Col1 Col2
----------- ---------- ----------
1 A X
2 B,C Y,Z
One solution, one that is at least syntactically straight-forward, is to use a User-Defined Aggregate to "Join" the values together. This does require SQLCLR and while some folks are reluctant to enable it, it does provide for a set-based approach that does not need to re-query the base table per each column. Joining is the opposite of Splitting and will create a comma-separated list of what was individual rows.
Below is a simple example that uses the SQL# (SQLsharp) library which comes with a User-Defined Aggregate named Agg_Join() that does exactly what is being asked for here. You can download the Free version of SQL# from http://www.SQLsharp.com/ and the example SELECTs from a standard system view. (And to be fair, I am the author of SQL# but this function is available for free).
SELECT sc.[object_id],
OBJECT_NAME(sc.[object_id]) AS [ObjectName],
SQL#.Agg_Join(sc.name) AS [ColumnNames],
SQL#.Agg_Join(DISTINCT sc.system_type_id) AS [DataTypes]
FROM sys.columns sc
GROUP BY sc.[object_id]
I recommend testing this against your current solution(s) to see which is the fastest for the volume of data you expect to have in at least the next year or two.