While loop taking too much time, can it may be optimized? - sql-server

I had 80,000 rows in may table , based on that i had to forecast there contribution till their date of retirement, When executing the query it takes around 20 Min to complete. How to optimize the query?
declare #Max as int
declare #Kount as int
Set #Kount = 1
SELECT row_number() Over (order by PRAN) as row,
PRAN, emp_cont, dob, doj_govt, dor,[100 sal],[BP+GP],nps14
into #rawdata1
FROM npst
order by PRAN
set #Max = (Select Max(row) from #rawdata1)
Create Table #Rawdata2
(
[Row] int,
PRAN float,
emp_cont float,
dob datetime,
doj_govt datetime,
dor datetime,
[100 sal] float,
[BP+GP] float,
nps14 float,
CalAmt float,
)
while (#kount <= #max)
BEGIN
DECLARE #PRAN float
SELECT #PRAN = (PRAN) FROM #rawdata1 where row=#Kount
DECLARE #SER_LEFT INT
SELECT #SER_LEFT= (DATEDIFF(YEAR,GETDATE(),dor)) from #rawdata1 where row=#Kount
DECLARE #NPS NUMERIC (10,2)
SELECT #NPS= nps14 from #rawdata1 where row=#Kount
DECLARE #I AS INT
SET #I=1
WHILE #I<#SER_LEFT
BEGIN
SET #NPS=(0.03*#NPS)+#NPS
SET #I=#I+1
END
INSERT INTO #rawdata2
SELECT #Kount as Row, PRAN, emp_cont, dob, doj_govt, dor,[100 sal],[BP+GP],nps14, #NPS FROM #rawdata1
WHERE PRAN=#PRAN
SET #Kount = #Kount + 1
END

You need to create an index on #Rawdata1 so SQL Server can use seeks instead of scans everytime you look for a row and update a row. I would create a clustered index first(that is the only way to create an index on a temp table) on the 'row' column and create another index on the 'PRAN' column.
Why is PRAN in parenthesis in the statement?
SELECT #PRAN = (PRAN) FROM #rawdata1 where row=#Kount

Related

How to separate variables and values, then insert in a table?

Problem
A stored procedure is receiving list of variables and values, and the delimiter. This stored procedure needs to insert those in a table.
--Example table
create table #tempo
(
Variable1 int,
Variable2 int,
Variable3 int
)
These are the parameters to the stored procedure:
declare #variableList varchar(100)
declare #valueList varchar(100)
declare #separator char(1)
set #variableList = 'Variable1#Variable2#Variable3'
set #valueList = '1111#2222#3333'
set #separator = '#'
Result
What I want to achieve is this:
select * from #tempo
+---------+---------+---------+
|Variable1|Variable2|Variable3|
+---------+---------+---------+
|1111 |2222 |3333 |
+---------+---------+---------+
One way to do it
I can use a loop and build dynamic SQL but I want to avoid it. Other than the obvious reasons for not using dynamic SQL, the loop structure is hard to maintain, explain and testing can become an issue too.
Ideal way
I am thinking about a more elegant way to do this, for example with string_split or coalesce etc. But cannot figure out a way without using dynamic SQL or loops.
If you always have same set of column names then it is very easy to do with pivoting, but if columns are changing then you can use the same script with dynamically adjusted list of variables, provided as a parameter or from direct reading from temp table:
INSERT INTO #tempo SELECT *
FROM (
SELECT [value], rv = 'Variable' + CAST(Row_Number() OVER ( ORDER BY (SELECT 1)) as VARCHAR)
FROM STRING_SPLIT(#valueList,#separator)
) AS src
PIVOT (MAX([value]) FOR rv IN (Variable1,Variable2,Variable3)) AS pvt;
You can always try pivoting out the data. This is just the select, but could easily have an insert wrapped into it.
We use a split string with a row ID to allow matching of two split data sets. Function is :
CREATE FUNCTION [dbo].[Split] (#RowData NVARCHAR(MAX), #SplitOn NVARCHAR(5))
RETURNS #RtnValue TABLE (Id INT IDENTITY(1, 1), Data NVARCHAR(100))
AS
BEGIN
DECLARE #Cnt INT;
SET #Cnt = 1;
WHILE (CHARINDEX(#SplitOn, #RowData) > 0)
BEGIN
INSERT INTO #RtnValue (Data)
SELECT Data = LTRIM(RTRIM(SUBSTRING(#RowData, 1, CHARINDEX(#SplitOn, #RowData) - 1)));
SET #RowData = SUBSTRING(#RowData, CHARINDEX(#SplitOn, #RowData) + 1, LEN(#RowData));
SET #Cnt = #Cnt + 1;
END;
INSERT INTO #RtnValue (Data)
SELECT Data = LTRIM(RTRIM(#RowData));
RETURN;
END;
You can then join the two sets together to give some key value pairs, and from there pivot out the data to give the format you requested. If you replace the last select with a select from any of the previous cte's then you can see how the logic unfolds.
DECLARE #variableList VARCHAR(100);
DECLARE #valueList VARCHAR(100);
DECLARE #separator CHAR(1);
SET #variableList = 'Variable1,Variable2,Variable3';
SET #valueList = '1111, 2222, 3333';
SET #separator = ',';
WITH cteVar AS (SELECT Id, Data FROM dbo.Split(#variableList, #separator) )
, cteVal AS (SELECT Id, Data FROM dbo.Split(#valueList, #separator) )
, cteData AS
(SELECT cteVar.Data VariableData
, cteVal.Data ValueData
FROM cteVar
JOIN cteVal ON cteVal.Id = cteVar.Id)
, ctePivot AS
(SELECT *
FROM cteData
PIVOT ( MAX(ValueData)
FOR VariableData IN ([Variable1], [Variable2], [Variable3])) AS PivotTable)
SELECT *
FROM ctePivot;
This is quite a long approach to it but hopefully it well help you understand the steps involved. Its worth looking at the Pivot function in general anyway, its well documented.

Avoid while loop to check row "state change"

I have a table that stores an Id, a datetime and an int crescent value. This value increases until it "breaks" and returns to a 0-near value. Ex: ...1000, 1200, 1350, 8, 10, 25...
I need to count how many times this "overflow" happens, BUT... I'm talking about a table that stores 200k rows per day!
I had already solved it! But using a procedure with a cursor that iterates over it with a while-loop. But I KNOW it isn't the faster way to do it.
Can someone help me to find some another way?
Thanks!
->
Table structure:
Id Bigint Primary Key, CreatedAt DateTime, Value Not Null Int.
Problem:
If Delta-Value between two consecutive rows is < 0, increase a counter.
Table has 200k new rows every-day.
No trigger allowed.
[FIRST EDIT]
Table has the actual structure:
CREATE TABLE ValuesLog (
Id BIGINT PRIMARY KEY,
Machine BIGINT,
CreatedAt DATETIME,
Value INT
)
I need:
To check when the [Value] of some [Machine] suddenly decreases.
Some users said to used LEAD/LAG. But it has a problem... if I chose many machines, the LEAD/LAG fuctions doesn't care about "what machine it is". So, if I find for machine-1 and machine-2, if machine-1 increase but the machine-2 descrease, LEAD/LAG will give me a false positive.
So, how my table actually looks:
Many rows of the actual table
(The image above are selecting for 3 ou 4 machines. But, IN THIS EXAMPLE, the machines are not messed up. But can occurs! And in this case, LEAD/LAG doesn't care if the line above are machine-1 or machine-2)
What I want:
In that line 85, the [value] breaks and restart. Id like to count every occorrence when it happens, the selected machines.
So:
"Machine-1 restarted 6 times... Machine-9 restarted 10 times..."
I had done something LIKE this:
CREATE PROCEDURE CountProduction #Machine INT_ARRAY READONLY, #Start DATETIME, #End DATETIME AS
SET NOCOUNT ON
-- Declare counter and insert start values
DECLARE #Counter TABLE(
machine INT PRIMARY KEY,
lastValue BIGINT DEFAULT 0,
count BIGINT DEFAULT 0
)
INSERT INTO #Counter(machine) SELECT n FROM #Machine
-- Declare cursor to iteract over results of values log
DECLARE valueCursor CURSOR LOCAL FOR
SELECT
Value,
Aux.LastValue,
Aux.count
FROM
ValueLog,
#Machine AS Machine,
#Counter AS Counter
WHERE
ValueLog.Machine = Machine.n
AND Counter.machine = ValueLog.Machine
AND ValueLog.DateCreate BETWEEN #Start AND #End;
-- Start iteration
OPEN valueCursor
DECLARE #RowMachine INT
DECLARE #RowValue BIGINT
DECLARE #RowLastValue BIGINT
DECLARE #RowCount BIGINT
FETCH NEXT FROM valueCursor INTO #RowMachine, #RowValue, #RowLastValue, #RowCount
-- Iteration
DECLARE #increment INT
WHILE ##FETCH_STATUS = 0
BEGIN
IF #RowValue < #RowLastValue
SET #increment = 1
ELSE
SET #increment = 0
-- Update counters
UPDATE
#Counter
SET
lastValue = #RowValue,
count = count + #increment
WHERE
inj = #RowMachine
-- Proceed to iteration
FETCH NEXT FROM valueCursor INTO #RowMachine, #RowValue, #RowLastValue, #RowCount
END
-- Closing iteration
CLOSE valueCursor
DEALLOCATE valueCursor
SELECT machine, count FROM #Counter
Use LEAD(). If the next row < current row, count that occurrence.
Solved using #jeroen-mostert suggested
DECLARE #Start DATETIME
DECLARE #End DATETIME
SET #Start = '2019-01-01'
SET #End = GETDATE()
SELECT
Machine,
COUNT(DeltaValue) AS Prod
FROM
(SELECT
Log.Machine,
Log.Value - LAG(Log.Value) OVER (PARTITION BY Log.Machine ORDER BY Log.Id) AS DeltaValue
FROM
ValueLog AS Log,
(SELECT
Id,
Machine,
Value
FROM
ValueLog
) AS AuxLog
WHERE
AuxLog.Id = Log.Id
AND Proto.DateCreate BETWEEN #Start AND #End
AND Proto.Machine IN (1, 9, 10)
) as TB1
WHERE
DeltaValue < 0
GROUP BY
Machine
ORDER BY
Machine
In this case, the inner LAG/LEAD function didn't mess up the content (what happened for some reason when I created a view... I'll try to understand later).
Thanks everybody! I'm new at DB, and this question make me crazy for a whole day.

Simple logic but not working as it should

This is what i want to achieve:
So trigger fires on opportunities table when a record with opp_type = 0 is inserted.
The next part of the code just does the calculation which is to pick up the last used number from your custom table and add 1 to it. Stores new value in a variable.
The next part is to do the insert into the user field.
Finally update the custom table to record the last used number.
I am getting the number to increment by one in the NEXTEXP1 table however the user field called O_Quote is not populating via the GUI.
is the code below doing what it should in terms of the explanation above?
by the steps in my trigger it seems the same but the user field is not populating with last number used:
alter TRIGGER [dbo].[Q2] ON [dbo].[AMGR_opportunity_Tbl] AFTER INSERT
AS
BEGIN
Declare #Opp_Type int
Select #Opp_Type = 0 from inserted
If #Opp_Type = 0
BEGIN
SET NOCOUNT ON;
DECLARE #Client_Id varchar(24)
DECLARE #Contact_Number int
DECLARE #NewNumber varchar(250)
DECLARE #NextQNo float
DECLARE #UDFName varchar(50)
DECLARE #GeneratorPrefix varchar(10)
DECLARE #GeneratorLength float
DECLARE #Opptype int
DECLARE #Type_id int
DECLARE #Oppid varchar (24)
--select top 1 nextqno = nextqno from nextexp1
SELECT #NewNumber = NextQno + 1 from dbo.NextEXP1
----insert into user field
insert into O_Quote(Client_Id, Contact_Number, Type_Id, Code_Id, [O_Quote])
values (#Client_Id,0,15,0,#NextQNo)
-------update table with last number used
UPDATE [dbo].[NextEXP1] SET NextQNo = #NewNumber
End
End
GO
#Leonidas199x is right with all points. I can also say that there are too many things that are unclear with that questions and lot's of the data is missing, however this is what I can suggest (this code handles bulk inserts also):
alter TRIGGER [dbo].[Q2] ON [dbo].[AMGR_opportunity_Tbl] AFTER INSERT
AS
BEGIN
DECLARE #NewNumber varchar(250);
SELECT #NewNumber = MAX(NextQno) FROM dbo.NextEXP1; -- I guess that's what you want
insert into O_Quote(Client_Id, Contact_Number, Type_Id, Code_Id, [O_Quote])
select Client_Id, Contact_Number, Type_Id, Code_Id, #NewNumber + row_num
FROM (
SELECT Client_Id, -- once again do not know where this value is taken from
0 Contact_Number,
15 Type_Id,
0 AS Code_Id,
ROW_NUMBER() OVER(order by client_id) row_num
FROM INSERTED WHERE Opp_Type = 0 --I guess that's the right column name
) a;
SELECT #NewNumber = MAX(O_Quote) FROM O_Quote;
UPDATE [dbo].[NextEXP1] SET NextQNo = #NewNumber;
END
Looking into this, I think your logic is a bit off:
Select #Opp_Type = 0 from inserted
This will always evaluate as 0.
You want to use:
SELECT #Opp_Type = i.Opp_Type
FROM inserted AS i;
Where i.Opp_Type is your column name.
Secondly, you declare a bunch of variables, but never set them:
DECLARE #Client_Id varchar(24)
DECLARE #Contact_Number int
DECLARE #NewNumber varchar(250)
DECLARE #NextQNo float
DECLARE #UDFName varchar(50)
DECLARE #GeneratorPrefix varchar(10)
DECLARE #GeneratorLength float
DECLARE #Opptype int
DECLARE #Type_id int
DECLARE #Oppid varchar (24)
And then go on to insert them. You need to set these, if you want to use them later. Should this be:
insert into O_Quote(Client_Id, Contact_Number, Type_Id, Code_Id, [O_Quote])
values (#Client_Id,0,15,0,#NewNumber)
Or do you need to set #NextQNo to be:
SELECT #NextQNo = NextQno from dbo.NextEXP1;
SELECT #NewNumber = #NextQNo + 1;
And lastly, the way this is written will cause you issues if you insert more than one record at a time. You would need to think about a loop, to get that MaxID, which isn't ideal. Can you look at using IDENTITY columns instead?

Procedure to award every 3rd person

Im working on a stored procedure, which should reward every 3rd person with a extra bonus on his current credit. Amount of bonus and (3rd person) option should be parameterized. Among is a my current code, but when I try to execute this with SQLFiddle, I get always the error Incorrect syntax near 'INTEGER'. - but I can't find out the mistake in my code. I'm using MS SQL Server 2014.
CREATE TABLE Customer (
custnr INTEGER PRIMARY KEY IDENTITY,
name VARCHAR(40) NOT NULL,
firstname VARCHAR(40) NOT NULL,
credit DECIMAL(12,2)
);
CREATE PROCEDURE awardBonus
#position INTEGER;
#bonus DECIMAL(5,2)
AS
BEGIN
DECLARE #creditCustomer DECIMAL(12,2);
DECLARE customer_cursor CURSOR FOR
SELECT custnr
FROM Customer
ORDER BY custnr ASC;
OPEN customer_cursor;
FETCH NEXT FROM customer_cursor INTO #custnr;
WHILE ##FETCH_STATUS = 0
BEGIN
IF (#custnr % #position = 0)
BEGIN
SELECT #creditCustomer = credit
FROM Customer
WHERE custnr = #custnr;
SET #creditCustomer = #creditCustomer + #bonus;
UPDATE Customer
SET credit = #creditCustomer
WHERE custnr = #custnr;
END;
FETCH NEXT FROM customer_cursor INTO #custnr;
END;
CLOSE customer_cursor;
DEALLOCATE customer_cursor;
END;
EXECUTE awardBonus 3, 100
You need to remove the ; in the parameter list:
#position INTEGER;
Also, you should declare first #custnr:
DECLARE #custnr INT;
You also have an invalid column name error in your ORDER BY clause:
ORDER BY knr ASC;
should be:
ORDER BY custnr ASC;
Not so fast!
You can rewrite this in a set-based fashion and remove the use of CURSOR
CREATE PROCEDURE awardBonus
#position INTEGER,
#bonus DECIMAL(5,2)
AS
BEGIN
WITH Cte AS(
SELECT *,
rn = ROW_NUMBER() OVER(ORDER BY custnr)
FROM Customer
)
UPDATE Cte
SET credit = credit + #bonus
WHERE
rn % #position = 0
END
CREATE PROCEDURE awardBonus
#position INTEGER;
#bonus DECIMAL(5,2)
there is a semicolon after integer should be a comma
corrected version
CREATE PROCEDURE awardBonus
#position INTEGER,
#bonus DECIMAL(5,2)
On a different note, how are you selecting the 3rd person, should this be a random selection or ordered? And why are you using the Cursor, the set based solutions seems to be a better choice. In both random or not cases you could construct the query using ROW_NUMBER() and select 3rd record for example.
You've got a ; where you need a ,:
CREATE PROCEDURE awardBonus
#position INTEGER;
#bonus DECIMAL(5,2)
Furthermore, CREATE PROCEDURE must be the only statement in a batch. So you'll have to create the table in a separate batch.
Also, you use ORDER BY knr ASC, knr does not exist.
You also use a variable #custnr which is not declared.

Building Stored Procedure to group data into ranges with roughly equal results in each bucket

I am trying to build one procedure to take a large amount of data and create 5 range buckets to display the data. the buckets ranges will have to be set according to the results.
Here is my existing SP
GO
/****** Object: StoredProcedure [dbo].[sp_GetRangeCounts] Script Date: 03/28/2010 19:50:45 ******/
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
ALTER PROCEDURE [dbo].[sp_GetRangeCounts]
#idMenu int
AS
declare
#myMin decimal(19,2),
#myMax decimal(19,2),
#myDif decimal(19,2),
#range1 decimal(19,2),
#range2 decimal(19,2),
#range3 decimal(19,2),
#range4 decimal(19,2),
#range5 decimal(19,2),
#range6 decimal(19,2)
SELECT #myMin=Min(modelpropvalue), #myMax=Max(modelpropvalue)
FROM xmodelpropertyvalues where modelPropUnitDescriptionID=#idMenu
set #myDif=(#myMax-#myMin)/5
set #range1=#myMin
set #range2=#myMin+#myDif
set #range3=#range2+#myDif
set #range4=#range3+#myDif
set #range5=#range4+#myDif
set #range6=#range5+#myDif
select #myMin,#myMax,#myDif,#range1,#range2,#range3,#range4,#range5,#range6
select t.range as myRange, count(*) as myCount
from (
select case
when modelpropvalue between #range1 and #range2 then 'range1'
when modelpropvalue between #range2 and #range3 then 'range2'
when modelpropvalue between #range3 and #range4 then 'range3'
when modelpropvalue between #range4 and #range5 then 'range4'
when modelpropvalue between #range5 and #range6 then 'range5'
end as range
from xmodelpropertyvalues where modelpropunitDescriptionID=#idmenu) t
group by t.range order by t.range
This calculates the min and max value from my table, works out the difference between the two and creates 5 buckets. The problem is that if there are a small amount of very high (or very low) values then the buckets will appear very distorted - as in these results...
range1 2806
range2 296
range3 75
range5 1
Basically I want to rebuild the SP so it creates buckets with equal amounts of results in each. I have played around with some of the following approaches without quite nailing it...
SELECT modelpropvalue, NTILE(5) OVER (ORDER BY modelpropvalue) FROM xmodelpropertyvalues - this creates a new column with either 1,2,3,4 or 5 in it
ROW_NUMBER()OVER (ORDER BY modelpropvalue) between #range1 and #range2
ROW_NUMBER()OVER (ORDER BY modelpropvalue) between #range2 and #range3
or maybe i could allocate every record a row number then divide into ranges from this?
You can use the ranking function ntile to split a result set in equal parts. This example creates a table with values 1...100, and splits them in 5 ranges:
set nocount on
declare #t table (value int)
declare #i int
set #i = 0
while #i < 100
begin
insert into #t (value) values (#i)
set #i = #i + 1
end
select
NTILE(5) over (order by value) as range
, value
from #t
By using ntile in a subquery, you can do groups and aggregate math on the ranges. For example, to print the sum of numbers 1..19, 20..39, 40..59, etc.
select range, SUM(value)
from (
select
NTILE(5) over (order by value) as range
, value
from #t
) sub
group by range
You can use top 20% to get the first fifth of the records, that will get you the end of the first range:
select #range1 = max(modelpropvalue)
from (
select top 20% modelpropvalue
from xmodelpropertyvalues where modelPropUnitDescriptionID = #idMenu
order by modelpropvalue
) x
Then you can use that value to exclude the first range, and use top 25% to get the next range:
select #range2 = max(modelpropvalue)
from (
select top 25% modelpropvalue
from xmodelpropertyvalues where modelPropUnitDescriptionID = #idMenu
where modelpropvalue > #range1
order by modelpropvalue
) x
And so on with 33.3333% and 50% for the third and fourth ranges.
Note that to get the correct count, you should not use between. As both the start and end values are inclusive, you will count the edge values for both the range where it ends and the range where it starts.
select t.range as myRange, count(*) as myCount
from (
select case
when modelpropvalue <= #range1 then 'range1'
when modelpropvalue <= #range2 then 'range2'
when modelpropvalue <= #range3 then 'range3'
when modelpropvalue <= #range4 then 'range4'
else 'range5'
end as range
from xmodelpropertyvalues where modelpropunitDescriptionID=#idmenu) t
group by t.range order by t.range
(Well, actually the case would protect you in this case as it would pick the first match, but if you would count them individually you would count some records twice. To get a single range you would exclude the first value and include the second: where modelpropvalue > #range1 and modelpropvalue <= #range2.)
OK, I got this working using both approaches but the problem seems to be with my data. In certain areas I have huge amounts of records with the same value on the field I am querying (it is the weight of trucks and one particular model is extremely popular), so there is really no way to divide this up evenly!
I have decided to go back to the original stored procedure, which calculated the ranges simply by dividing min and max value by 5, as it ran much quicker. However, as this SP is run up to 8 times in the page (for weight, fuel capacity, engine size, etc.), I could do with speeding it up a bit, as it takes about 0.5 secs to run. Here is the full SP - any ideas on how I can optimize its speed would be greatly appreciated...
ALTER PROCEDURE [dbo].[sp_GetRangeCounts]
#idMenu int,
#myFilters varchar(5000),
#myStore int,
#myLabel varchar(50) OUTPUT,
#myUnit varchar(50) OUTPUT,
#range1 int OUTPUT,
#range2 int OUTPUT,
#range3 int OUTPUT,
#range4 int OUTPUT,
#range5 int OUTPUT,
#range6 int OUTPUT,
#range1count int OUTPUT,
#range2count int OUTPUT,
#range3count int OUTPUT,
#range4count int OUTPUT,
#range5count int OUTPUT
AS
declare
#myMin int,
#myMax int,
#myDif int
declare #myInfoTable table(
myMin integer,
myMax integer,
myLabel varchar(50),
myUnit varchar(50)
)
insert #myInfoTable (myMin,myMax,myLabel,myUnit) exec('SELECT Min(ConvertedValue) as myMin, Max(ConvertedValue) as myMax,unitDescriptionTrans as myLabel,unitUnitTransDescription as myUnit
FROM LenPropValsView where UnitDescriptionID='+#idMenu+' and xStoreID='+#myStore+#myFilters+' group by unitdescriptionTrans,unitUnitTransDescription')
select #myMin=myMin-1 from #myInfoTable
select #myMax=myMax+1 from #myInfoTable
select #myLabel=myLabel from #myInfoTable
select #myUnit=myUnit from #myInfoTable
set #myDif=(#myMax-#myMin)/5
set #range1=#myMin
set #range2=#myMin+#myDif
set #range3=#range2+#myDif
set #range4=#range3+#myDif
set #range5=#range4+#myDif
set #range6=#myMax
select #myLabel,#myUnit,#myMin,#myMax,#myDif,#range1,#range2,#range3,#range4,#range5,#range6
declare #myData table(
myRange varchar(50),
myCount integer
)
insert #myData(myRange,myCount)
exec ('select t.range as myRange, count(*) as myCount
from (
select case
when ConvertedValue <='+#range2+' then ''range1''
when ConvertedValue >'+#range2+' and ConvertedValue<='+#range3+' then ''range2''
when ConvertedValue >'+#range3+' and ConvertedValue<='+#range4+' then ''range3''
when ConvertedValue >'+#range4+' and ConvertedValue<='+#range5+' then ''range4''
else ''range5''
end as range
from LenPropValsView where unitDescriptionID='+#idmenu+' and xStoreID='+#myStore+#myFilters+') t
group by t.range order by t.range')
select #range1count=myCount from #myData where myRange='range1'
select #range2count=myCount from #myData where myRange='range2'
select #range3count=myCount from #myData where myRange='range3'
select #range4count=myCount from #myData where myRange='range4'
select #range5count=myCount from #myData where myRange='range5'
select #range1count,#range2count,#range3count,#range4count,#range5count

Resources