I have some data in SQL Server:
test1^test2^test3^test4
test5
test6^test7
null
Desired output:
test4
test5
test7
null
You can use right() with charindex() & reverse():
select *, right(col, charindex('^', reverse(col) + '^')-1)
from ( values ('test1^test2^test3^test4'), ('test5'), ('test6^test7'), (null)
) t(col);
Demo
One method would be to split the value, using DelimitedSplit8k_LEAD and then take the "last" one (by ordering high to low). This method uses TOP 1 WITH TIES I used this method as order seems to be irrelevant here:
WITH VTE AS (
SELECT Col
FROM (VALUES('test1^test2^test3^test4'),
('test5'),
('test6^test7'),
('test9^test10'),
(null)) V(col))
SELECT TOP 1 WITH TIES
V.Col,
DS.item
FROM VTE V
CROSS APPLY dbo.delimitedsplit8k_LEAD(V.Col,'^') DS
ORDER BY ROW_NUMBER() OVER (PARTITION BY V.Col ORDER BY DS.ItemNumber DESC);
This avoids what can be an expensive REVERSE.
using charindex and reverse:
I left all the functions in there so you could see the process.
declare #t table (test varchar(100))
insert into #t
values
('test1^test2^test3^test4')
,('test5')
,('test6^test7')
,(null )
select test
,revTest
,charInd
,reverse(left(revTest,case when charind = 0 then len(revTest) else charInd-1 end))
from #t t
cross apply (select reverse(rtrim(ltrim(test)))) a(revTest)
cross apply (select CHARINDEX('^',revTest)) b(charInd)
Results:
**test revTest charInd (Result)**
test1^test2^test3^test4 4tset^3tset^2tset^1tset 6 test4
test5 5tset 0 test5
test6^test7 7tset^6tset 6 test7
NULL NULL NULL NULL
Try This
declare
#col varchar(500), #reveCol varchar(500);
set #col='test133^test2^test3^test4';
set #reveCol=reverse(#col)
Select charindex('^', #revecol )
select right( #col ,charindex('^', #revecol )-1)
Assuming you're on SQL Server 2016+
Also assuming this schema and data:
DROP TABLE IF EXISTS #Data;
CREATE TABLE #Data(StringValue NVARCHAR(MAX));
INSERT INTO #Data(StringValue)VALUES
('test1^test2^test3^test4')
,('test5')
,('test6^test7')
,(NULL)
;
The query:
;WITH cte AS (
SELECT '["' + REPLACE(COALESCE(d.StringValue,''),'^','","') + '"]' AS [JSONValue]
,ROW_NUMBER()OVER(ORDER BY d.StringValue) AS [ID]
FROM #Data d
)
SELECT NULLIF(a.value,'') AS [Value]
FROM (
SELECT j.value,ROW_NUMBER()OVER(PARTITION BY c.ID ORDER BY j.[key] DESC) AS [rn]
FROM cte AS c
CROSS APPLY OPENJSON(c.JSONValue) j
) a
WHERE a.rn = 1
;
If you're on SQL Server 2017+
SELECT MAX(CASE WHEN d.StringValue LIKE '%'+s.value THEN s.[value] ELSE NULL END) AS [Result]
FROM ( VALUES
('test1^test2^test3^test4')
,('test5')
,('test6^test7')
,(NULL)
) d(StringValue)
CROSS APPLY STRING_SPLIT(COALESCE(d.StringValue,''),'^') s
GROUP BY d.StringValue
;
Related
I am creating a CTE as I need all InquiryId in a variable in order to proceed to a WHILE loop.
I am trying to fill a variable via a CTE like below
DECLARE #inqIdsToClose TABLE (InquiryId int)
With CTE (InquiryId,InquirySubject,CreateDt,SendTo,[From],SendBy) as
(
Select I.InquiryId,I.InquirySubject,I.CreateDt,I.SendTo,I.[From],U.Email as SendBy
From Inquiries I
Inner Join Users U
ON I.[From] = U.UserID
Where I.InquiryId Not In (Select InquiryId from InquiryReply)
And I.InquiryStatusId <> 5
And DATEDIFF(day, I.CreateDt, getdate()) >=7
)
Insert into #inqIdsToClose
Select InquiryId from CTE
Print #inqIdsToClose;
But SQL Server is not allowing me to fill #inqIdsToClose
The InquiryId which are returned by CTE are multiple which I will later use in a while loop.
You were missing the terminator after you declared your table variable
Example
DECLARE #inqIdsToClose TABLE (InquiryId int);
with cte as (
-- Dummy query
Select SomeVal=1
Union All
Select SomeVal=2
)
Insert Into #inqIdsToClose Select SomeVal from cte
Select * from #inqIdsToClose
Returns
InquiryId
1
2
So the wonderful people here on stackoverflow helped me with a "find consecutive failures" type query. (Status =4 is a failure). I thought I had cracked the second part of my problem because my test case seems to work fine but whenever I run it on our test environment I get dodgy results, so I must be doing something wrong. The goal is to find X number of consecutive failures. So the below is set to find 2 consecutive failures. I'm using SQL Server 2008 R2
DECLARE #t TABLE (
[InstructionId] INT,
[InstructionDetailId] INT,
[Sequence] INT,
[Status] INT
)
INSERT INTO #t SELECT 222,111,1, 2
INSERT INTO #t SELECT 222,112,2,2
INSERT INTO #t SELECT 222,113,3,4
INSERT INTO #t SELECT 222,114,4,4
INSERT INTO #t SELECT 222,115,5,2
INSERT INTO #t SELECT 222,116,6,4
INSERT INTO #t SELECT 222,117,7,2
INSERT INTO #t SELECT 222,118,8,4
INSERT INTO #t SELECT 222,119,9,4
INSERT INTO #t SELECT 222,120,10,2
INSERT INTO #t SELECT 222,121,11,2
INSERT INTO #t SELECT 222,124,12,4
INSERT INTO #t SELECT 222,126,13,4
INSERT INTO #t SELECT 222,128,14,4
INSERT INTO #t SELECT 223,126,13,4
INSERT INTO #t SELECT 223,128,14,4
INSERT INTO #t SELECT 223,129,15,2
INSERT INTO #t SELECT 223,130,16,4
INSERT INTO #t SELECT 224,111,17,4
INSERT INTO #t SELECT 224,112,18,4
INSERT INTO #t SELECT 223,160,33,4
INSERT INTO #t SELECT 223,161,34,4
INSERT INTO #t SELECT 223,162,35,4
INSERT INTO #t SELECT 223,163,40,4
;with HardcoreCTE AS
(
select t.*,
t.[Sequence] - ROW_NUMBER() OVER(PARTITION BY t.instructionId ORDER BY
t.InstructionDetailId) AS ItemCount
from #t t outer apply
( select top (1) t1.*
from #t t1
where t1.InstructionId = t.InstructionId and
t1.Sequence < t.Sequence
order by t1.Sequence desc
) t1 outer apply
( select top (1) t2.*
from #t t2
where t2.InstructionId = t.InstructionId and
t2.Sequence > t.Sequence
order by t2.Sequence
) t2
where t.status = 4 and (t.status = t1.status or t.status = t2.status)
)
,
HardCoreCTE2
AS
(
select *, Count(1) OVER(PARTITION BY ItemCount) AS ItemCount2 from
HardcoreCTE
)
select * from HardCoreCTE2
where ItemCount2 =2
So the above works brilliants to find results where there are specifically only 2 consecutive failures with these
results:
Now from the above results the only ones it finds are the records where there are 2 consecutive failures but whenever I convert the above to the actual test environment tables it doesn't seem to work.
Test Env Results: As you can see for the "InstructionId" of 2518380 it brought back one record and the for "InstructionId" 2614351. It's meant to bring back sets of 2 records.
Test Env Query: (Pretty much identical)
;with InitialDataCTE
AS
(
SELECT Instruction.InstructionID,InstructionDetail.InstructionDetailID,
InstructionDetail.InstructionDetailStatusID AS [Status],
InstructionDetail.Sequence
FROM Instruction INNER JOIN
InstructionDetail ON Instruction.InstructionID =
InstructionDetail.InstructionID
where InstructionDetailStatusID =4
and InstructionDetail.PaymentDateOriginal between '2015-01-05'
AND '2018-09-08'
),
HardCoreCTE
AS
(
select t.*,
t.Sequence - ROW_NUMBER() OVER(PARTITION BY t.instructionId ORDER BY
t.InstructionDetailId) AS ItemCount
from InitialDataCTE t outer apply
( select top (1) t1.*
from InitialDataCTE t1
where t1.InstructionId = t.InstructionID and
t1.Sequence < t.Sequence
order by t1.Sequence desc
) t1 outer apply
( select top (1) t2.*
from InitialDataCTE t2
where t2.InstructionId = t.InstructionId and
t2.Sequence > t.Sequence
order by t2.Sequence
) t2
where t.Status = 4 and (t.Status = t1.Status or t.Status = t2.Status)
)
,
HardCoreCTE2
AS
(
select *, Count(1) OVER(PARTITION BY ItemCount) AS ItemCount2 from
HardCoreCTE
)
select * from HardCoreCTE2
where ItemCount2 =2
order by InstructionID, Sequence
Really appreciate if someone can tell me where I am going wrong, I've been messing around with variations of the Count(*) but nothing successful yet. Thanx alot
I came to the next query:
with
a as (
select *,
row_number() over(partition by InstructionId order by Sequence)-
row_number() over(partition by InstructionId, [Status] order by Sequence) g
from #t
),
b as (
select *,
count(*) over(partition by InstructionId, [Status], g) c
from a
where [Status] = 4
)
select *
from b
where c > 2
order by 1, 3;
For your test data, I got the following result:
InstructionId InstructionDetailId Sequence Status g c
222 224 312 4 6 3
222 226 413 4 6 3
222 228 514 4 6 3
223 161 84 4 2 3
223 162 95 4 2 3
223 163 140 4 2 3
You can test this query here.
Hi I have a SQL Server table that one column has comma separated values:
12323,234322,1112,99323.....
And I have a parameter #values nvarchar(500) that will also have comma separated values.
In my query I need to check if anything from the parameter exists in my table field.
Something like this>
...
WHERE
(#values = '' OR select s from dbo.Split(',',t.Data) in ( select s from dbo.Split(',',#values )))
Of course the above gives me errors.
Any clue?
Join both tables that you got out of the split
SELECT *
...
FROM (SELECT s FROM dbo.Split(',',t.Data)) X
INNER JOIN (SELECT s FROM dbo.Split(',',#values)) Y
ON X.s = Y.s
...
EXISTS is your friend here.
WHERE
(#values = '' OR EXISTS (select a.value from string_split(t.Data, ',') a inner join ( select value from string_split(#values, ',')) b ON a.value = b.value))
Try this below code it may helps you
IF OBJECT_ID('Tempdb..#Temp') IS NOT NULL
Drop table #Temp
Declare #SearchVariable varchar(1000)='12323,234322,1112,99323,22222,4545,656565,8989,1111,22222'--Varibale Contains these values to search
CREATE TABLE #Temp (CommaValue Varchar(100))-- This is the table having comma separted value columns
INSERT INTO #Temp
SELECT '12323,234322,1112,99323' Union all
SELECT '12323,656565,1112,4545'
Declare #VariableSearch TABLE (ValueName varchar(1000))
Insert into #VariableSearch
SELECT #SearchVariable
;With cte
AS
(
SELECT Split.a.value('.', 'VARCHAR(1000)') AS TablesData
FROM (
SELECT CAST('<S>' + REPLACE(CommaValue, ',', '</S><S>') + '</S>' AS XML) AS TablesData
FROM #Temp
) AS A
CROSS APPLY TablesData.nodes('/S') AS Split(a)
)
SELECT DISTINCT ROW_NUMBER()Over(Order by (SELECT 1)) AS Rno, * from cte C Inner join
(
SELECT Split.a.value('.', 'VARCHAR(1000)') AS VariableSeachData
FROM (
SELECT CAST('<S>' + REPLACE(ValueName, ',', '</S><S>') + '</S>' AS XML) AS VariableSeachData
FROM #VariableSearch
) AS A
CROSS APPLY VariableSeachData.nodes('/S') AS Split(a)
)DT
On C.TablesData=DT.VariableSeachData
OutPut
Rno TablesData VariableSeachData
---------------------------------
1 1112 1112
2 1112 1112
3 12323 12323
4 12323 12323
5 234322 234322
6 4545 4545
7 656565 656565
8 99323 99323
Not quite sure, but maybe this can give you an idea.
using Outer Apply and EXISTS operator.
SELECT x.value
FROM Table T
OUTER APPLY ( SELECT value
FROM dbo.Split(t.data)
) X
WHERE EXISTS ( SELECT 1
FROM dbo.Split(#values) S
WHERE s.value = x.value )
I have table described bellow from which I need to select all rows with [Value] greater for example at least 5 points than [Value] from previous row (ordered by [Id]). Starting with first row of [Id] 1, desired output would be:
[Id] [Value]
---------------
1 1
4 12
8 21
Code:
declare #Data table
(
[Id] int not null identity(1, 1) primary key,
[Value] int not null
);
insert into #Data ([Value])
select 1 [Value]
union all
select 5
union all
select 3
union all
select 12
union all
select 8
union all
select 9
union all
select 16
union all
select 21;
select [t1].*
from #Data [t1];
Edit:
So, based on JNevill's and Hogan's answers I end with this:
;with [cte1]
as (
select [t1].[Id],
[t1].[Value],
cast(1 as int) [rank]
from #Data [t1]
where [t1].[Id] = 1
union all
select [t2].[Id],
[t2].[Value],
cast(row_number() over (order by [t2].id) as int) [rank]
FROM [cte1] [t1]
inner join #Data [t2] on [t2].[value] - [t1].[value] > 5
and [t2].[Id] > [t1].[Id]
where [t1].[rank] = 1
)
select [t1].[Id],
[t1].[Value]
from [cte1] [t1]
where [t1].[rank] = 1;
which is working. Alan Burstein answer is correct too (but applicable only on MSSQL 2012+ - due to LAG fc). I will do some performance tests (I'm on 2016 version) and will see performance over my real data (approx. 30 millions of records).
If you are on 2012+ you can use LAG which will provide a better performing solution that a recursive CTE. I'm including your sample data so you can just copy/paste/test...
-- Your sample data
DECLARE #Data TABLE
(
Id int not null identity(1, 1) primary key,
Value int not null
);
insert into #Data ([Value])
select 1 [Value] union all select 5 union all select 3 union all select 12 union all
select 8 union all select 9 union all select 16 union all select 21;
-- Solution using window functions
WITH
prevRows AS
(
SELECT t1.Id, t1.Value, prevDiff = LAG(t1.Value, 1) OVER (ORDER BY t1.id) - t1.Value
FROM #Data t1
),
NewPrev AS
(
SELECT t1.Id, t1.Value, NewDiff = Value - LAG(t1.Value,1) OVER (ORDER BY t1.id)
FROM prevRows t1
WHERE prevDiff <= -5 OR prevDiff IS NULL
)
SELECT t1.Id, t1.Value
FROM NewPrev t1
WHERE NewDiff >= 5 OR NewDiff IS NULL;
I believe the best way to pull this off is using a recursive CTE. A Recursive CTE is a special type of CTE that refers back to itself. It's made up of two parts.
The recursive seed/anchor which establishes the beginning of the recursion. In your case, record with ID=1.
The recursive term/member which is the statement that refers back to itself by the name of the CTE. Here we pull through the next record that is greater than 5 from the previous found record according to the ID sorted ascending.
Code:
WITH RECURSIVE recCTE AS
(
/*Select first record for recursive seed/anchor*/
SELECT
id,
value,
cast(1 as INT) as [rank]
FROM table
WHERE id = 1
UNION ALL
/*find the next value that is more than 5 from the current value*/
SELECT
table.id,
table.value
ROW_NUMBER() OVER (ORDER BY id)
FROM
recCTE INNER JOIN table
ON table.value - recCTE.value > 5
AND table.id > recCTE.id
WHERE recCTE.[rank]=1
)
SELECT id, value FROM recCTE;
I've made use of the Row_Number() Window Function to find the rank of the matching record by ID sorted Ascending. With the WHERE clause in the recursive term we only grab the first found record that is 5 more than the previous found record. Then we head into the next recursive step.
You can do it with a recursive CTE
with find_values as
(
-- Find first value
SELECT Value
FROM #Table
ORDER BY ID ASC
FETCH FIRST 1 ROW ONLY
UNION ALL
-- Find next value
SELECT Value
FROM #Table
CROSS JOIN find_values
WHERE Value >= find_values.Value + 5
ORDER BY ID ASC
FETCH FIRST 1 ROW ONLY
)
SELECT *
FROM find_values
UNPIVOT will not return NULLs, but I need them in a comparison query. I am trying to avoid using ISNULL the following example (Because in the real sql there are over 100 fields):
Select ID, theValue, column_name
From
(select ID,
ISNULL(CAST([TheColumnToCompare] AS VarChar(1000)), '') as TheColumnToCompare
from MyView
where The_Date = '04/30/2009'
) MA
UNPIVOT
(theValue FOR column_name IN
([TheColumnToCompare])
) AS unpvt
Any alternatives?
To preserve NULLs, use CROSS JOIN ... CASE:
select a.ID, b.column_name
, column_value =
case b.column_name
when 'col1' then a.col1
when 'col2' then a.col2
when 'col3' then a.col3
when 'col4' then a.col4
end
from (
select ID, col1, col2, col3, col4
from table1
) a
cross join (
select 'col1' union all
select 'col2' union all
select 'col3' union all
select 'col4'
) b (column_name)
Instead of:
select ID, column_name, column_value
From (
select ID, col1, col2, col3, col4
from table1
) a
unpivot (
column_value FOR column_name IN (
col1, col2, col3, col4)
) b
A text editor with column mode makes such queries easier to write. UltraEdit has it, so does Emacs. In Emacs it's called rectangular edit.
You might need to script it for 100 columns.
It's a real pain. You have to switch them out before the UNPIVOT, because there is no row produced for ISNULL() to operate on - code generation is your friend here.
I have the problem on PIVOT as well. Missing rows turn into NULL, which you have to wrap in ISNULL() all the way across the row if missing values are the same as 0.0 for example.
I ran into the same problem. Using CROSS APPLY (SQL Server 2005 and later) instead of Unpivot solved the problem. I found the solution based on this article An Alternative (Better?) Method to UNPIVOT
and I made the following example to demonstrate that CROSS APPLY will NOT Ignore NULLs like Unpivot.
create table #Orders (OrderDate datetime, product nvarchar(100), ItemsCount float, GrossAmount float, employee nvarchar(100))
insert into #Orders
select getutcdate(),'Windows',10,10.32,'Me'
union
select getutcdate(),'Office',31,21.23,'you'
union
select getutcdate(),'Office',31,55.45,'me'
union
select getutcdate(),'Windows',10,null,'You'
SELECT OrderDate, product,employee,Measure,MeasureType
from #Orders orders
CROSS APPLY (
VALUES ('ItemsCount',ItemsCount),('GrossAmount',GrossAmount)
)
x(MeasureType, Measure)
SELECT OrderDate, product,employee,Measure,MeasureType
from #Orders orders
UNPIVOT
(Measure FOR MeasureType IN
(ItemsCount,GrossAmount)
)AS unpvt;
drop table #Orders
or, in SQLServer 2008 in shorter way:
...
cross join
(values('col1'), ('col2'), ('col3'), ('col4')) column_names(column_name)
Using dynamic SQL and COALESCE, I solved the problem like this:
DECLARE #SQL NVARCHAR(MAX)
DECLARE #cols NVARCHAR(MAX)
DECLARE #dataCols NVARCHAR(MAX)
SELECT
#dataCols = COALESCE(#dataCols + ', ' + 'ISNULL(' + Name + ',0) ' + Name , 'ISNULL(' + Name + ',0) ' + Name )
FROM Metric WITH (NOLOCK)
ORDER BY ID
SELECT
#cols = COALESCE(#cols + ', ' + Name , Name )
FROM Metric WITH (NOLOCK)
ORDER BY ID
SET #SQL = 'SELECT ArchiveID, MetricDate, BoxID, GroupID, ID MetricID, MetricName, Value
FROM
(SELECT ArchiveID, [Date] MetricDate, BoxID, GroupID, ' + #dataCols + '
FROM MetricData WITH (NOLOCK)
INNER JOIN Archive WITH (NOLOCK)
ON ArchiveID = ID
WHERE BoxID = ' + CONVERT(VARCHAR(40), #BoxID) + '
AND GroupID = ' + CONVERT(VARCHAR(40), #GroupID) + ') p
UNPIVOT
(Value FOR MetricName IN
(' + #cols + ')
)AS unpvt
INNER JOIN Metric WITH (NOLOCK)
ON MetricName = Name
ORDER BY MetricID, MetricDate'
EXECUTE( #SQL )
I've found left outer joining the UNPIVOT result to the full list of fields, conveniently pulled from INFORMATION_SCHEMA, to be a practical answer to this problem in some contexts.
-- test data
CREATE TABLE _t1(name varchar(20),object_id varchar(20),principal_id varchar(20),schema_id varchar(20),parent_object_id varchar(20),type varchar(20),type_desc varchar(20),create_date varchar(20),modify_date varchar(20),is_ms_shipped varchar(20),is_published varchar(20),is_schema_published varchar(20))
INSERT INTO _t1 SELECT 'blah1', 3, NULL, 4, 0, 'blah2', 'blah3', '20100402 16:59:23.267', NULL, 1, 0, 0
-- example
select c.COLUMN_NAME, Value
from INFORMATION_SCHEMA.COLUMNS c
left join (
select * from _t1
) q1
unpivot (Value for COLUMN_NAME in (name,object_id,principal_id,schema_id,parent_object_id,type,type_desc,create_date,modify_date,is_ms_shipped,is_published,is_schema_published)
) t on t.COLUMN_NAME = c.COLUMN_NAME
where c.TABLE_NAME = '_t1'
</pre>
output looks like:
+----------------------+-----------------------+
| COLUMN_NAME | Value |
+----------------------+-----------------------+
| name | blah1 |
| object_id | 3 |
| principal_id | NULL | <======
| schema_id | 4 |
| parent_object_id | 0 |
| type | blah2 |
| type_desc | blah3 |
| create_date | 20100402 16:59:23.26 |
| modify_date | NULL | <======
| is_ms_shipped | 1 |
| is_published | 0 |
| is_schema_published | 0 |
+----------------------+-----------------------+
Writing in May'22 with testing it on AWS Redshift.
You can use a with clause where you can coalesce the columns where nulls are expected. Alternatively, you can use coalesce in the select statement prior to the UNPIVOT block.
And don't forget to alias with the original column name (Not following won't break or violate the rule but would save some time for coffee).
Select ID, theValue, column_name
From
(select ID,
coalesce(CAST([TheColumnToCompare] AS VarChar(1000)), '') as TheColumnToCompare
from MyView
where The_Date = '04/30/2009'
) MA
UNPIVOT
(theValue FOR column_name IN
([TheColumnToCompare])
) AS unpvt
OR
WITH TEMP1 as (
select ID,
coalesce(CAST([TheColumnToCompare] AS VarChar(1000)), '') as TheColumnToCompare
from MyView
where The_Date = '04/30/2009'
)
Select ID, theValue, column_name
From
(select ID, TheColumnToCompare
from MyView
where The_Date = '04/30/2009'
) MA
UNPIVOT
(theValue FOR column_name IN
([TheColumnToCompare])
) AS unpvt
I had your same problem and this is
my quick and dirty solution :
your query :
select
Month,Name,value
from TableName
unpivot
(
Value for Name in (Col_1,Col_2,Col_3,Col_4,Col_5
)
) u
replace with :
select Month,Name,value from
( select
isnull(Month,'no-data') as Month,
isnull(Name,'no-data') as Name,
isnull(value,'no-data') as value from TableName
) as T1
unpivot
(
Value
for Name in (Col_1,Col_2,Col_3,Col_4,Col_5)
) u
ok the null value is replaced with a string, but all rows will be returned !!
ISNULL is half the answer. Use NULLIF to translate back to NULL. E.g.
DECLARE #temp TABLE(
Foo varchar(50),
Bar varchar(50) NULL
);
INSERT INTO #temp( Foo,Bar )VALUES( 'licious',NULL );
SELECT * FROM #temp;
SELECT
Col,
NULLIF( Val,'0Null' ) AS Val
FROM(
SELECT
Foo,
ISNULL( Bar,'0Null' ) AS Bar
FROM
#temp
) AS t
UNPIVOT(
Val FOR Col IN(
Foo,
Bar
)
) up;
Here I use "0Null" as my intermediate value. You can use anything you like. However, you risk collision with user input if you choose something real-world like "Null". Garbage works fine "!##34())0" but may be more confusing to future coders. I am sure you get the picture.