I found some solutions to replace (below example) #test.col2 with data from #test2.src. But in the result it just selects a single random value and replaces them all with it. How to fix? Thanks!
#test (the target table)
col1 col2
-------------
A 1
B 2
C 3
D 4
E 5
#test2 (the source table)
src1
sample1
sample2
sample3
Query:
UPDATE #test
SET col1 = data1.LastName
FROM #test
CROSS APPLY
(SELECT TOP(1) #test2.LastName
FROM #test2
ORDER BY NEWID()) data1
Example result:
col1 col2
----------------
A sample2
B sample2
C sample2
D sample2
E sample2
Here is one way to tackle this. It is using ROW_NUMBER in a cte to "randomize" the values.
if OBJECT_ID('tempdb..#test') is not null
drop table #test;
create table #test
(
col1 varchar(20)
, col2 int
);
insert #test
select 'A', 1 union all
select 'B', 2 union all
select 'C', 3 union all
select 'D', 4 union all
select 'E', 5;
if OBJECT_ID('tempdb..#test2') is not null
drop table #test2;
create table #test2
(
LastName varchar(20)
);
insert #test2
select 'src1' union all
select 'sample1' union all
select 'sample2' union all
select 'sample3';
--here is the data before any updates
select * from #test;
with t1 as
(
select col1
, col2
, RowNum = ROW_NUMBER() over(order by newid())
from #test
)
, t2 as
(
select LastName
, RowNum = ROW_NUMBER() over(order by newid())
from #test2
)
update t
set col1 = t2.LastName
from t1
join t2 on t1.RowNum = t2.RowNum
join #test t on t.col1 = t1.col1
--we now have updated with a "random" row
select * from #test;
Related
I have a table 1:
CREATE TABLE table1
INSERT INTO table1 values('XYZ')
INSERT INTO table1 values('ABC')
INSERT INTO table1 values('XYZ~ABC~AAA')
INSERT INTO table1 values('123')
Then, I have string 'ABC~XYZ~123'. I need to split this string into each word by using SQL:
Select VALUE FROM STRING_SPLIT('ABC~XYZ~123','~')
The return is table2
ABC
XYZ
123
I want to count how many times each word in table2 existed in table 1
The expected output is
ABC|3
XYZ|2
123|1
Any ideas on this?
If I understand your case correctly, the next statement may help:
Text and table:
DECLARE #text varchar(100) = 'ABC~XYZ~123'
CREATE TABLE Data (
Id int,
[Text] varchar(100)
)
INSERT INTO Data
(Id, [Text])
VALUES
(1, 'XYZ'),
(2, 'ABC'),
(3, 'XYZ~ABC~AAA'),
(4, '123~ABC')
Statement:
SELECT t.[value] AS [Word], j.[Count]
FROM STRING_SPLIT(#text, '~') t
LEFT JOIN (
SELECT s.[value], COUNT(*) AS [Count]
FROM Data d
CROSS APPLY STRING_SPLIT(d.[Text], '~') s
GROUP BY s.[value]
) j ON t.[value] = j.[value]
Result:
-----------
Word Count
-----------
ABC 3
XYZ 2
123 1
Apart from the suggestions as in comment you can use Count() function as below. But storing in this format will give you difficulty for the extraction as well as in join with the other tables.
Select T1Value, Count(*) as [NoCount] from(
Select table1.Value as T1Value, Value FROM STRING_SPLIT('ABC~XYZ~123','~')
inner join table1 on Value = table1.Value
)a group by T1Value
Edit
CREATE TABLE table1(
TableValue varchar(max)
);
INSERT INTO table1 (TableValue) values ( 'XYZ');
INSERT INTO table1 ( TableValue) values ( 'ABC');
INSERT INTO table1 ( TableValue) values ( 'XYZ~ABC~AAA');
INSERT INTO table1 ( TableValue) values ( '123~ABC');
SELECT b.value
,Count(*)
FROM (
SELECT VALUE
FROM STRING_SPLIT('ABC~XYZ~123', '~')
) a
INNER JOIN (
SELECT *
FROM table1
CROSS APPLY STRING_SPLIT(TableValue, '~')
) b ON a.Value = b.Value
GROUP BY b.Value
Here is the given Live Demo on db <> fiddle
Setup
create table STRINGS (ID int, STRINGS varchar(max));
insert into STRINGS (ID, STRINGS) values (1, 'XYZ');
insert into STRINGS (ID, STRINGS) values (1, 'ABC');
insert into STRINGS (ID, STRINGS) values (1, 'XYZ~ABC~AAA');
insert into STRINGS (ID, STRINGS) values (1, '123~ABC');
declare #VALUES varchar(max) = 'XYZ~ABC~123';
Calculation :
select V1.VALUE, count(STRINGS.ID)
from string_split(#VALUES,'~') V1
cross join STRINGS
outer apply string_split(STRINGS.STRINGS,'~') V2
where V2.VALUE = V1.VALUE
group by V1.VALUE
Result
-----------
Value Num
-----------
ABC 3
XYZ 2
123 1
Live exemple :
https://dbfiddle.uk/?rdbms=sqlserver_2017&fiddle=15b95efcf69ea98fafbb7dda1c624551
I have a table with two columns like this:
A 1
B 2
C 3
D 4
E 5
etc.
I want to get them into one column, with each column's data in alternate rows of the new column like this:
A
1
B
2
C
3
D
4
E
5
etc.
I would use a UNION ALL but here is the UNPIVOT alternative:
CREATE TABLE #Table1(letter VARCHAR(10),Id VARCHAR(10))
INSERT INTO #Table1(letter ,Id )
SELECT 'A',1 UNION ALL
SELECT 'B',2 UNION ALL
SELECT 'C',3 UNION ALL
SELECT 'D',4 UNION ALL
SELECT 'E',5
SELECT [value]
FROM #Table1
UNPIVOT
(
[value] FOR [Column] IN ([Id], [letter])
) UNPVT
DROP TABLE #Table1;
The tricky part is the data in alternate rows
select col2
from
( select col1, 1 as flag, col1 from tab
union all
select col1, 2, col2 from tab
) as dt
order by col1, flag
But why do you try to do this at all?
Try this :
WITH CTE AS
(
SELECT Col1Name Name,(ROW_NUMBER() OVER(ORDER BY(SELECT NULL))) RN
FROM TableName
UNION
SELECT Col2Name Name,(ROW_NUMBER() OVER(ORDER BY(SELECT NULL)))+1 RN
FROM TableName
)
SELECT Name
FROM CTE
ORDER BY RN
CREATE TABLE #Table1(Value VARCHAR(10),Id VARCHAR(10))
INSERT INTO #Table1(Value ,Id )
SELECT 'A',1 UNION ALL
SELECT 'B',2 UNION ALL
SELECT 'C',3 UNION ALL
SELECT 'D',4 UNION ALL
SELECT 'E',5
;WITH _CTE (Name) AS
(
SELECT Value [Name]
FROM #Table1
UNION ALL
SELECT Id [Name]
FROM #Table1
)
SELECT * FROM _CTE
create table a ( col1 int, col2 int)
create table b (col1 int,col2 int)
insert b
select 1,2
union
select 1,2
insert a
select 1,2
union
select 2,2
Expected o/p (need to join two tables and then get true for first match, false for second match and if not match also false)
1,2,T
1,2,F
2,2,F
SELECT col1, col2,
CASE WHEN (rownumber = 1 AND othercol is not null)
THEN 'T' ELSE 'F' END col3
FROM
(
Select a.col1, a.col2,b.col1 othercol, ROW_NUMBER() OVER(Partition by a.col1 ,a.col2 order by a.col1,a.col2) rownumber
from #a a
LEFT JOIN #b b ON a.col1 = b.col1 AND a.col2 = b.col2
) t
I have tow tables with the same number of rows
Example:
table a:
1,A
2,B
3,C
table b:
AA,BB
AAA,BBB,
AAAA,BBBB
I want a new table made like that in SQL SErver:
1,A,AA,BB
2,B,AAA,BBB
3,C,AAAA,BBBB
How do I do that?
In SQL Server 2005 (or newer), you can use something like this:
-- test data setup
DECLARE #tablea TABLE (ID INT, Val CHAR(1))
INSERT INTO #tablea VALUES(1, 'A'), (2, 'B'), (3, 'C')
DECLARE #tableb TABLE (Val1 VARCHAR(10), Val2 VARCHAR(10))
INSERT INTO #tableb VALUES('AA', 'BB'),('AAA', 'BBB'), ('AAAA', 'BBBB')
-- define CTE for table A - sort by "ID" (I just assumed this - adapt if needed)
;WITH DataFromTableA AS
(
SELECT ID, Val, ROW_NUMBER() OVER(ORDER BY ID) AS RN
FROM #tablea
),
-- define CTE for table B - sort by "Val1" (I just assumed this - adapt if needed)
DataFromTableB AS
(
SELECT Val1, Val2, ROW_NUMBER() OVER(ORDER BY Val1) AS RN
FROM #tableb
)
-- create an INNER JOIN between the two CTE which just basically selected the data
-- from both tables and added a new column "RN" which gets a consecutive number for each row
SELECT
a.ID, a.Val, b.Val1, b.Val2
FROM
DataFromTableA a
INNER JOIN
DataFromTableB b ON a.RN = b.RN
This gives you the requested output:
You could do a rank over the primary keys, then join on that rank:
SELECT RANK() OVER (table1.primaryKey),
T1.*,
T2.*
FROM
SELECT T1.*, T2.*
FROM
(
SELECT RANK() OVER (table1.primaryKey) [rank], table1.* FROM table1
) AS T1
JOIN
(
SELECT RANK() OVER (table2.primaryKey) [rank], table2.* FROM table2
) AS T2 ON T1.[rank] = T2.[rank]
Your query is strange, but in Oracle you can do this:
select a.*, tb.*
from a
, ( select rownum rn, b.* from b ) tb -- temporary b - added rn column
where a.c1 = tb.rn -- assuming first column in a is called c1
if there is not column with numbers in a you can do same trick twice
select ta.*, tb.*
from ( select rownum rn, a.* from a ) ta
, ( select rownum rn, b.* from b ) tb
where ta.rn = tb.rn
Note: be aware that this can generate random combination, for example
1 A AA BB
2 C A B
3 B AAA BBB
because there is no order by in ta and tb
I have the following table:
CREATE TABLE TEST(ID TINYINT NULL, COL1 CHAR(1))
INSERT INTO TEST(ID,COL1) VALUES (1,'A')
INSERT INTO TEST(ID,COL1) VALUES (2,'B')
INSERT INTO TEST(ID,COL1) VALUES (1,'A')
INSERT INTO TEST(ID,COL1) VALUES (1,'B')
INSERT INTO TEST(ID,COL1) VALUES (1,'B')
INSERT INTO TEST(ID,COL1) VALUES (2,'B')
I would like to select duplicate rows from that table. How can I select them?
I tried the following:
SELECT TEST.ID,TEST.COL1
FROM TEST WHERE TEST.ID IN
(SELECT ID
FROM TEST WHERE TEST.COL1 IN
(SELECT COL1
FROM TEST WHERE TEST.ID IN
(SELECT ID
FROM TEST
GROUP BY ID
HAVING COUNT(*) > 1)
GROUP BY COL1
HAVING COUNT(*) > 1)
GROUP BY ID
HAVING COUNT(*) > 1)
Where's the error? What do I need to modify?
And I would like it to show as:
ID COL1
---- ----
1 A
1 A
1 B
1 B
(4 row(s) affected)
SELECT id, col1
FROM Test
GROUP BY id, col1
HAVING COUNT(*) > 1
when you use
SELECT id, col1, COUNT(*) AS cnt
FROM Test
GROUP BY id, col1
HAVING COUNT(*) > 1
you practically have all duplicate rows and how often they appear. You can't identify them individually either way.
A slower way would be:
SELECT id, col1
FROM Test T
WHERE (SELECT COUNT(*)
FROM Test I
WHERE I.id = T.id AND I.col1 = T.col1) > 1
Using Sql Server 2005+ and CTE you could try
;WITH Dups AS (
SELECT *,
ROW_NUMBER() OVER(PARTITION BY ID, Col1 ORDER BY ID, Col1) Rnum
FROM #TEST t
)
SELECT *
FROM Dups
WHERE Rnum > 1
OR just a standard
SELECT ID,
Col1,
COUNT(1) Cnt
FROM #TEST
GROUP BY ID,
Col1
HAVING COUNT(1) > 1
EDIT:
Display duplicate rows
SELECT t.*
FROM #Test t INNER JOIN
(
SELECT ID,
Col1,
COUNT(1) Cnt
FROM #TEST
GROUP BY ID,
Col1
HAVING COUNT(1) > 1
) dups ON t.ID = dups.ID
AND t.Col1 = dups.Col1
Every row in that set of data is a duplicate
select id, col1, count(*)
from test
group by id, col1
shows this
if you want to exclude the 2,B rows you need to do it explicitly
eg
SELECT id, col1
FROM Test
WHERE NOT (id = 2 and col1 = 'B')
SELECT t.*
FROM TEST t
INNER JOIN (
SELECT ID,COL1
from test
GROUP BY ID,COL1
HAVING COUNT(*) > 1
)
AS t2
ON t2.ID = t.ID AND t2.COL1 =t.COL1
order by t.ID,t.COL1