How to query from range? - sql-server

I have a table like this
CREATE TABLE Table1
([range] varchar(9), [sector] int)
;
INSERT INTO Table1
([range], [sector])
VALUES
('684-733', 2),
('563-598', 3),
('514-544', 2),
('640-682', 3),
('1053-1152', 2)
;
I want to get information by passing a predicate
So far I have this
select sector from table1 where [range] = 564
expected outcome
3
Is there any function I can use to get the data?

Please try the following solution.
SQL
-- DDL and sample data population, start
DECLARE #tbl TABLE (ID INT IDENTITY PRIMARY KEY, [range] varchar(9), [sector] int);
INSERT INTO #tbl ([range], [sector]) VALUES
('684-733', 2),
('563-598', 3),
('514-544', 2),
('640-682', 3),
('1053-1152', 2);
-- DDL and sample data population, end
DECLARE #param INT = 564;
;WITH rs AS
(
SELECT *
, LEFT([range], pos -1) AS [start]
, RIGHT([range], LEN([range]) - pos) AS [end]
FROM #tbl
CROSS APPLY (SELECT CHARINDEX('-', [range])) AS t(pos)
)
SELECT sector
FROM rs
WHERE #param BETWEEN [start] AND [end];
Output
+--------+
| sector |
+--------+
| 3 |
+--------+

Storing a number as a string like this could cause many issues later. Your best move is refactoring the table as follows:
CREATE TABLE Table1
([beginrange] int, [endrange] int, [sector] int)
Insert into table1 values
(684, 733, 2)
...
You would then get your result with:
select sector from table1 where 564 between [beginrange] and [endrange]
That said, if you do not have control over this table, you'll need to parse the string into two integers:
select * from table1 where 580 between convert(int, substring([range], 0, charindex('-', [range]))) and convert(int, substring([range], charindex('-', [range]) + 1, len([range])))
You can look up the various functions used here.

in SQL Server 2016 and later you can use string_split :
select sector
from table1 t1
cross apply string_split(t1.range, '-') y
group by sector
having 564 between min(y.value) and max(y.value)
db<>fiddle here

use a cross apply and a case statement to find a range where 564 is between the start and end values
select
tbl1.range,
case when 564 between Lookup.startValue and Lookup.endValue then
tbl1.sector
end Sector
from #tbl tbl1
cross apply
(
select
tbl2.range,
tbl2.sector,
cast(left(tbl2.range,charindex('-',tbl2.range)-1) as int) startValue,
cast(right(tbl2.range,len(tbl2.range)-charindex('-',tbl2.range)) as int) endValue
from #tbl tbl2
where tbl1.range=tbl2.range
)Lookup
where
case when 564 between Lookup.startValue and Lookup.endValue then
tbl1.sector
end is not null
output:
range Sector
563-598 3

Related

How to split a string from a table SQL Server 2012?

I'd like to split comma-delimited strings in SQL Server 2012. I'm interested in an XML solution, not a function or while loop (performance and permissions reasons). I read this post: STRING_SPLIT in SQL Server 2012 which was helpful, however, my context is not splitting a variable but rather a column in a table. Below is an example of the kind of dataset I'm working with:
CREATE TABLE #EXAMPLE
(
ID INT,
LIST VARCHAR(1000)
)
INSERT INTO #EXAMPLE
VALUES (1, '12345,54321'), (2, '48965'), (3, '98765,45678,15935'), (4, '75315')
SELECT * FROM #EXAMPLE
DROP TABLE #EXAMPLE
Given that dataset, how could I go about splitting the LIST field on the comma so that I get this data set?
CREATE TABLE #EXAMPLE
(
ID INT,
LIST VARCHAR(1000)
)
INSERT INTO #EXAMPLE
VALUES (1, '12345'), (1, '54321'), (2, '48965'), (3, '98765'), (3, '45678'), (3, '15935'), (4, '75315')
SELECT * FROM #EXAMPLE
DROP TABLE #EXAMPLE
I feel like I'm blanking on implementing this with a table column as opposed to a variable, but I'm sure it's pretty similar. I'd be greatly appreciative of any input. Thanks!
If you want an XML solution the following should hopefully suffice.
Note - this is easily wrapped in a reusable table-valued function however you state you don't want a function so just using in-line.
select e.id, s.List
from #example e
cross apply (
select List = y.i.value('(./text())[1]', 'varchar(max)')
from (
select x = convert(xml, '<i>' + replace(e.list, ',', '</i><i>') + '</i>').query('.')
) as a cross apply x.nodes('i') as y(i)
)s
See working Fiddle
Taking into account your link, this can be done by slightly changing the query by adding Cross Apply.
Select e.ID, t.a
From #Example As e Cross Apply (
SELECT Split.a.value('.', 'NVARCHAR(MAX)') DATA
FROM
(
SELECT CAST('<X>'+REPLACE(e.List, ',', '</X><X>')+'</X>' AS XML) AS String
) AS A
CROSS APPLY String.nodes('/X') AS Split(a)) As t(a)
As #Charlieface already mentioned there is a risk to bump into XML entities: ampersand and the like.
That's why I always use a CDATA section for safety.
SQL
-- DDL and sample data population, start
DECLARE #tbl TABLE (ID INT, LIST VARCHAR(1000));
INSERT INTO #tbl VALUES
(1, '12345,<54321'),
(2, '48965'),
(3, '98765,45678,15935'),
(4, '75315');
-- DDL and sample data population, end
SELECT e.id, s.List
FROM #tbl e
CROSS APPLY (
SELECT List = y.i.value('(./text())[1]', 'VARCHAR(MAX)')
FROM (
SELECT x = TRY_CAST('<i><![CDATA[' + REPLACE(e.list, ',', ']]></i><i><![CDATA[') + ']]></i>' AS XML)
) AS a CROSS APPLY x.nodes('i') as y(i)
) AS s;
Output
+----+--------+
| id | List |
+----+--------+
| 1 | 12345 |
| 1 | <54321 |
| 2 | 48965 |
| 3 | 98765 |
| 3 | 45678 |
| 3 | 15935 |
| 4 | 75315 |
+----+--------+

Compare the two tables and update the value in a Flag column

I have two tables and the values like this
`create table InputLocationTable(SKUID int,InputLocations varchar(100),Flag varchar(100))
create table Location(SKUID int,Locations varchar(100))
insert into InputLocationTable(SKUID,InputLocations) values(11,'Loc1, Loc2, Loc3, Loc4, Loc5, Loc6')
insert into InputLocationTable(SKUID,InputLocations) values(12,'Loc1, Loc2')
insert into InputLocationTable(SKUID,InputLocations) values(13,'Loc4,Loc5')
insert into Location(SKUID,Locations) values(11,'Loc3')
insert into Location(SKUID,Locations) values(11,'Loc4')
insert into Location(SKUID,Locations) values(11,'Loc5')
insert into Location(SKUID,Locations) values(11,'Loc7')
insert into Location(SKUID,Locations) values(12,'Loc10')
insert into Location(SKUID,Locations) values(12,'Loc1')
insert into Location(SKUID,Locations) values(12,'Loc5')
insert into Location(SKUID,Locations) values(13,'Loc4')
insert into Location(SKUID,Locations) values(13,'Loc2')
insert into Location(SKUID,Locations) values(13,'Loc2')`
I need to get the output by matching SKUID's from Each tables and Update the value in Flag column as shown in the screenshot, I have tried something like this code
`SELECT STUFF((select ','+ Data.C1
FROM
(select
n.r.value('.', 'varchar(50)') AS C1
from InputLocation as T
cross apply (select cast('<r>'+replace(replace(Location,'&','&'), ',', '</r><r>')+'</r>' as xml)) as S(XMLCol)
cross apply S.XMLCol.nodes('r') as n(r)) DATA
WHERE data.C1 NOT IN (SELECT Location
FROM Location) for xml path('')),1,1,'') As Output`
But not convinced with output and also i am trying to avoid xml path code, because performance is not first place for this code, I need the output like the below screenshot. Any help would be greatly appreciated.
I think you need to first look at why you think the XML approach is not performing well enough for your needs, as it has actually been shown to perform very well for larger input strings.
If you only need to handle input strings of up to either 4000 or 8000 characters (non max nvarchar and varchar types respectively), you can utilise a tally table contained within an inline table valued function which will also perform very well. The version I use can be found at the end of this post.
Utilising this function we can split out the values in your InputLocations column, though we still need to use for xml to concatenate them back together for your desired format:
-- Define data
declare #InputLocationTable table (SKUID int,InputLocations varchar(100),Flag varchar(100));
declare #Location table (SKUID int,Locations varchar(100));
insert into #InputLocationTable(SKUID,InputLocations) values (11,'Loc1, Loc2, Loc3, Loc4, Loc5, Loc6'),(12,'Loc1, Loc2'),(13,'Loc4,Loc5'),(14,'Loc1');
insert into #Location(SKUID,Locations) values (11,'Loc3'),(11,'Loc4'),(11,'Loc5'),(11,'Loc7'),(12,'Loc10'),(12,'Loc1'),(12,'Loc5'),(13,'Loc4'),(13,'Loc2'),(13,'Loc2'),(14,'Loc1');
--Query
-- Derived table splits out the values held within the InputLocations column
with i as
(
select i.SKUID
,i.InputLocations
,s.item as Loc
from #InputLocationTable as i
cross apply dbo.fn_StringSplit4k(replace(i.InputLocations,' ',''),',',null) as s
)
select il.SKUID
,il.InputLocations
,isnull('Add ' -- The split Locations are then matched to those already in #Location and those not present are concatenated together.
+ stuff((select ', ' + i.Loc
from i
left join #Location as l
on i.SKUID = l.SKUID
and i.Loc = l.Locations
where il.SKUID = i.SKUID
and l.SKUID is null
for xml path('')
)
,1,2,''
)
,'No Flag') as Flag
from #InputLocationTable as il
order by il.SKUID;
Output:
+-------+------------------------------------+----------------------+
| SKUID | InputLocations | Flag |
+-------+------------------------------------+----------------------+
| 11 | Loc1, Loc2, Loc3, Loc4, Loc5, Loc6 | Add Loc1, Loc2, Loc6 |
| 12 | Loc1, Loc2 | Add Loc2 |
| 13 | Loc4,Loc5 | Add Loc5 |
| 14 | Loc1 | No Flag |
+-------+------------------------------------+----------------------+
For nvarchar input (I have different functions for varchar and max type input) this is my version of the string splitting function linked above:
create function [dbo].[fn_StringSplit4k]
(
#str nvarchar(4000) = ' ' -- String to split.
,#delimiter as nvarchar(1) = ',' -- Delimiting value to split on.
,#num as int = null -- Which value in the list to return. NULL returns all.
)
returns table
as
return
-- Start tally table with 10 rows.
with n(n) as (select 1 union all select 1 union all select 1 union all select 1 union all select 1 union all select 1 union all select 1 union all select 1 union all select 1 union all select 1)
-- Select the same number of rows as characters in #str as incremental row numbers.
-- Cross joins increase exponentially to a max possible 10,000 rows to cover largest #str length.
,t(t) as (select top (select len(isnull(#str,'')) a) row_number() over (order by (select null)) from n n1,n n2,n n3,n n4)
-- Return the position of every value that follows the specified delimiter.
,s(s) as (select 1 union all select t+1 from t where substring(isnull(#str,''),t,1) = #delimiter)
-- Return the start and length of every value, to use in the SUBSTRING function.
-- ISNULL/NULLIF combo handles the last value where there is no delimiter at the end of the string.
,l(s,l) as (select s,isnull(nullif(charindex(#delimiter,isnull(#str,''),s),0)-s,4000) from s)
select rn
,item
from(select row_number() over(order by s) as rn
,substring(#str,s,l) as item
from l
) a
where rn = #num
or #num is null;
go

Speeding up my query using sql server 2008.Alternative to cross apply

I have a function that is performing very slow.I am working a database that I need to migrate data and I have NO control over!.
Ideally I would like to use a view directly since this function is called by a view ,but I could only seem to be able to do it by calling a function.
===
A view should return whatever is in the dummytable by orderNo.If an orderNo has a paymenttype of "Interest" than the balance should be interest,if "tax" should be tax
In real life I will have 200000 rows and more,by using cross apply it seems to slow down quite a lot.
Is there a better way to get the data rather than using CrossApply?
Noddy Sample here(data and datatypes are just fictious for semplicity of the example)
CREATE DATABASE DummyDB
GO
use DummyDB
IF object_id(N'DummyTable', 'U') IS NOT NULL
DROP TABLE DummyTable
GO
CREATE TABLE DummyTable
(
Id int,
OrderNo varchar(255),
PaymentType varchar(255),
Credit varchar(255),
Debit varchar(255),
Balance varchar(255)
)
GO
INSERT INTO [dbo].[DummyTable]([Id], [OrderNo], [PaymentType], [Credit], [Debit], [Balance])
SELECT 1, N'200', N'Interest', N'10', N'5', N'5' UNION ALL
SELECT 2, N'201', N'Deposit', N'400', N'30', N'370' UNION ALL
SELECT 3, N'202', N'Tax', N'20', N'10', N'10' UNION ALL
SELECT 4, N'202', N'Tax', N'50', N'10', N'10'
--my sample attempt not performing
use DummyDB
select * from DummyTable
Declare #OrderNo int
set #OrderNo=202
SELECT
Tax=tx.Tax,
Interest=tx1.Interest,
Deposit=tx2.Deposit
FROM DummyTable T1
CROSS APPLY(SELECT
Tax=sum(cast(T2.Balance as money))
FROM DummyTable T2
WHERE T2.OrderNo=#OrderNo
AND PaymentType='Tax')as tx
CROSS APPLY(select
Interest=sum(cast(T2.Balance as money))
FROM DummyTable T2
WHERE T2.OrderNo=#OrderNo
AND PaymentType='Interest')as tx1
CROSS APPLY(select
Deposit=sum(cast(T2.Balance as money))
FROM DummyTable T2
WHERE T2.OrderNo=#OrderNo
AND PaymentType='Deposit')as tx2
WHERE T1.OrderNo=#OrderNo
Any Suggestion of using something more efficient than cross apply?
Many thanks
This will do almost the same as your sample query. It will give you one row with the result where your query will repeat the values for all rows that match #OrderNo.
select sum(case when T1.PaymentType = 'Tax' then cast(T1.Balance as money) else 0 end) as Tax,
sum(case when T1.PaymentType = 'Interest' then cast(T1.Balance as money) else 0 end) as Interest,
sum(case when T1.PaymentType = 'Deposit' then cast(T1.Balance as money) else 0 end) as Deposit
from DummyTable as T1
where T1.OrderNo = #OrderNo
BTW, you should make sure that the data type for OrderNo in the table is the same as the variable #OrderNo. It looks like you are dealing with integers so you should change the table. If that is not possible for you then you need to change #OrderNo to varchar(255) if you want to use an index on OrderNo.

How can I maintain a running total in a SQL Server database using VB.NET?

I am using Visaul Studio 2010 to build a Windows Forms application to maintain a table in an SQL Server 2008 database. The table is named CASHBOOK and here are the further details:
DATE | DESCRIPTION | DEBIT | CREDIT | BALANCE
--------|----------------|---------|-----------|---------
1/1/2011| CASH BALANCE | | | 5000
1/1/2011| SALES | 2500 | | 7500
2/1/2011| PURCHASE | | 3000 | 4500
2/1/2011| RENT | | 4000 | 500
2/1/2011| SALES | 5000 | | 5500
I can use CASHBOOKTABLEADAPTER.INSERT(...) to insert appropriately, but my problem is how do I update the BALANCE column?
See this article by Alexander Kuznetsov
Denormalizing to enforce business rules: Running Totals
You can try an insert with a subquery, something like following:
INSERT INTO CASHBOOK ( DESCRIPTION, DEBIT, BALANCE )
'asdf', 2500, SELECT TOP(1) BALANCE FROM CASHBOOK + 2500
It's a bit heavy handed, but here's a way to update the full table with balance information.
update
a
set
a.Balance = (
select sum(isnull(x.debit, 0.0) - isnull(x.credit, 0.0))
from cashbook x
where x.Date < a.Date
or (x.Date = a.Date and x.ID <= a.ID)
) + (
select top 1 y.Balance
from cashbook y
where y.debit is null
and y.credit is null
order by y.ID
)
from
cashbook a
Now that's useful only if you HAVE to have the balance in the table. A more appropriate solution might be to create a UDF that encompasses this logic and call that to calculate the balance field for a specific row only when you need it. It really all depends on your usage.
create function dbo.GetBalance(#id int) returns decimal(12, 2) as
begin
declare #result decimal(12, 2) = 0.0
select
#result = (
select sum(isnull(x.debit, 0.0) - isnull(x.credit, 0.0))
from cashbook x
where x.Date < a.Date
or (x.Date = a.Date and x.ID <= a.ID)
) + (
select top 1 y.Balance
from cashbook y
where y.debit is null
and y.credit is null
order by y.ID
)
from
cashback a
where
a.ID = #id
return #result
end
Why do you need to? This is something that should be calculated as a reporting / viewing function. I would suggest either creating a view with a running total column (various ways to achieve this).
Alternatively if you're viewing this in VB.Net calculate it in your app.
I agree with Joel, you should be calculating this at runtime, not storing the running totals in the database. Here's an example of how to figure out the running totals using a recursive cte in sql server:
declare #values table (ID int identity(1,1), Value decimal(4,2))
declare #i int
insert into #values values (1.00)
insert into #values values (2.00)
insert into #values values (3.00)
insert into #values values (4.00)
insert into #values values (5.00)
insert into #values values (6.00)
select #i=min(ID) from #values
;with a as
(
select ID, Value, Value as RunningTotal
from #values
where ID=#i
union all
select b.ID, b.Value, cast(b.Value + a.RunningTotal as decimal(4,2)) as RunningTotal
from #values b
inner join a
on b.ID=a.ID+1
)
select * from a
here's a blog on recursive queries: Recursive CTEs
Also here's a lengthy discusson about running totals.
One potential problem with recursive CTEs is the maximum depth limit of 32767, which can be prohibitive in a production environment.
In this solution you add an id column that is ordinal to the transaction sequence and then update the balance column in place.
declare #t table(id int identity(1,1) not null
, [DATE] date not null
, [DESCRIPTION] varchar(80) null
, [DEBIT] money not null default(0)
, [CREDIT] money not null default(0)
, [BALANCE] money not null default(0)
);
declare #bal money=0;
insert into #t([DATE],[DESCRIPTION],[DEBIT],[CREDIT],[BALANCE])
select '1/1/2011','CASH BALANCE',0,0,5000 UNION ALL
select '1/1/2011','SALES',2500,0,0 UNION ALL
select '2/1/2011','PURCHASE',0,3000,0 UNION ALL
select '2/1/2011','RENT',0,4000,0 UNION ALL
select '2/1/2011','SALES',5000,0,0;
set #bal=(select top 1 [BALANCE] from #t order by id); /* opening balance is stored but not computed, so we simply look it up here. */
update t
set #bal=t.[BALANCE]=(t.[DEBIT]-t.[CREDIT])+#bal
output
inserted.*
from #t t
left join #t t0 on t0.id+1=t.id; /*should order by id by default, but to be safe we force the issue here. */

Sorting SQL table

can anyone help me with T-SQL to sort this table
ID Comment ParentId
-- ------- --------
3 t1 NULL
4 t2 NULL
5 t1_1 3
6 t2_1 4
7 t1_1_1 5
to look like this
ID Comment ParentId
-- ------- --------
3 t1 NULL
5 t1_1 3
7 t1_1_1 5
4 t2 NULL
6 t2_1 4
Kind regards,
Lennart
try this:
DECLARE #YourTable table (id int, Comment varchar(10), parentID int)
INSERT INTO #YourTable VALUES (3, 't1' , NULL)
INSERT INTO #YourTable VALUES (4, 't2' , NULL)
INSERT INTO #YourTable VALUES (5, 't1_1' , 3)
INSERT INTO #YourTable VALUES (6, 't2_1' , 4)
INSERT INTO #YourTable VALUES (7, 't1_1_1', 5)
;with c as
(
SELECT id, comment, parentid, CONVERT(varchar(8000),RIGHT('0000000000'+CONVERT(varchar(10),id),10)) as SortBy
from #YourTable
where parentID IS NULL
UNION ALL
SELECT y.id, y.comment, y.parentid, LEFT(c.SortBy+CONVERT(varchar(8000),RIGHT('0000000000'+CONVERT(varchar(10),y.id),10)),8000) AS SortBy
FROM c
INNER JOIN #YourTable y ON c.ID=y.PArentID
)
select * from C ORDER BY SortBy
EDIT
here is output
id comment parentid SortBy
----------- ---------- ----------- ---------------------------------
3 t1 NULL 0000000003
5 t1_1 3 00000000030000000005
7 t1_1_1 5 000000000300000000050000000007
4 t2 NULL 0000000004
6 t2_1 4 00000000040000000006
(5 row(s) affected)
humm order by?
http://t-sql.pro/t-sql/ORDER-BY.aspx
SELECT ID, Comment, ParentId
FROM TestTable
ORDER BY Comment, ParentId asc
This sounds very much like a homework question, but here's some hints on where to go with this:
You'll want to do a quick google or StackOverflow search for the ORDER BY clause to be able to get a set of results ordered by the column you want to use (i.e. the 'Comment' column).
Once you've got that, you can start writing a SQL statement to order your results.
If you need to then place re-order the actual table (and not just get the results in a specific order), you'll need to look up using temporary tables (try searching for 'DECLARE TABLE'). Much like any temp swap, you can place the results you have in a temporary place, delete the old data, and then replace the table contents with the temporary data you have, but this time in the order you want.
But just ordering by Comment will give you that? Or have I missed the point?!
declare #table table
(
Comment varchar(10)
)
insert into #table (Comment) values ('t1')
insert into #table (Comment) values ('t2')
insert into #table (Comment) values ('t1_1')
insert into #table (Comment) values ('t2_1')
insert into #table (Comment) values ('t1_1_1')
select * from #table order by comment

Resources