Increment Numbers with Alphabets In SQL Server Table - sql-server

My Table structure is
id type no amount
1 type1 a1 1000
2 type1 a2 2000
3 type2 b1 3000
4 type3 c1 4000
5 type1 a3 5000
6 type2 b2 6000
7 type2 b3 7000
8 type3 c2 8000
now i wants to increment the no field data based on the type.
for example for type1 the next no is a4
and
for numeric only I am using the following code
SELECT ISNULL(Max(No),0)+1 AS No FROM table
but how to do it for with Alphabets in SQL Server 2005

Assuming that prefixes are of single character length, you may try following:
;with cte as (
select type, typePrefix = left(no, 1), typeNum = right(no, len(no) - 1)
from TableName
)
select typePrefix + cast(isnull(max(typeNum), 0) + 1 as varchar(10))
from cte
where type = 'type1'
group by typePrefix
But it will not work if you try to generate next no for a type which is not in table (e.g. 'type4'). To allow it, you may need a separate table, where prefix for each type is specified:
create table TypePrefixes (type varchar(50), prefix varchar(10))
insert into TypePrefixes values ('type1', 'a')
insert into TypePrefixes values ('type2', 'b')
insert into TypePrefixes values ('type3', 'c')
insert into TypePrefixes values ('another_type', 'd')
--etc.
In this case, statement to get next no will look as:
select tp.prefix + cast(isnull(max(cast(right(t.no, len(t.no) - len(tp.prefix)) as int)), 0) + 1 as varchar(20))
from TableName t
right join TypePrefixes tp on tp.type = t.type
where tp.type = 'type4'
group by tp.prefix
Also, you may just wish to calculate no for each record on the fly, like:
;with cte as (
select *,
typeNum = row_number() over (partition by type order by id),
typePrefix = char(dense_rank() over (order by type) + ascii('a') - 1)
from TableName
)
select *, No2 = typePrefix + cast(typeNum as varchar(10))
from cte
However, the latter is limited in number of distinct types in your table, which should not exceed 26 (so that we not go beyond 'z').

try something like
SELECT ISNULL(Max(No),0)+1 AS No FROM table group by type

First, you need an UNIQUE index on No column:
CREATE UNIQUE INDEX IUN_MyTable_On
ON MySchema.MyTable(On);
GO
This unique index will prevent duplicate values but, also, will help the query below.
Second, you could use this script to generate the next No for a given letter:
DECLARE #Chr CHAR(1);
SET #Chr='A';
BEGIN TRY
BEGIN TRANSACTION;
DECLARE #LastId INT;
DECLARE #NewNo VARCHAR(...); -- Fill with No's max. length
-- Previous index will help this query
SELECT #LastId=MAX( CONVERT(INT,SUBSTRING(#LastNo,2,8000)) )
FROM MySchema.MyTable x WITH(UPDLOCK) -- It locks the rows to prevent a concurent session to generate the same value (No)
WHERE x.No LIKE #Chr+'%';
SET #NewNo=#Chr+CONVERT(VARCHAR(11),ISNULL(#LastId,0)+1);
-- Do whatever you want with the new value: ex. INSERT
INSERT INTO ... (No,...)
VALUES (#NewNo,...);
COMMIT TRANSACTION;
END TRY
BEGIN CATCH
DECLARE #ErrMsg NVARCHAR(2000);
SET #ErrMsg=ERROR_MESSAGE();
IF ##TRANCOUNT>0
BEGIN
ROLLBACK;
END
RAISERROR(#ErrMsg,16,1);
END CATCH
Note #1: This solution should be safe if this is the only way to generate the new values (#NewNo).
Note #2: If that SELECT query acquires at least 5000 locks then SQL Server will escalate locks at table/partition level.

Related

Allocate Unique Random case number SQL 2008

I have a list of teams in one table and list of cases in another table. I have to allocate a unique random case number to each one of the members in the team. What is the best way to generate unique random case number for each team member. I have read about NewID() and CRYPT_GEN_RANDOM(4) functions. I tried using them but not getting unique number for each team member. Can some one please help me. Thanks for your time. I am using SQL 2008.
I have a 'Teams' table which has team members, their ids(TM1,TM2 etc.) and their names.
I have another 'Cases' table which has ID numbers like 1,2,3,4 etc. I want to allocate random case to each team member. The desired output should be as below.
Team member Random_case_allocated
TM1 3
TM2 5
TM3 7
TM4 2
TM5 8
TM6 6
I have tried
SELECT TOP 1 id FROM cases
ORDER BY CRYPT_GEN_RANDOM(4)
It is giving the same id for all team members. I want a different case id for each team member. Can someone please help. Thank you.
The TOP(1) ORDER BY NEWID() will not work the way you are trying to get it to work here. The TOP is telling the query engine you are only interested on the first record of the result set. You need to have the NEWID() evaluate for each record. You can force this inside of a window function, such as ROW_NUMBER(). This could optimized I would imagine, however, it was what I could come up with from the top of my head. Please note, this is not nearly a truly random algorithm.
UPDATED With Previous Case Exclusions
DECLARE #User TABLE(UserId INT)
DECLARE #Case TABLE(CaseID INT)
DECLARE #UserCase TABLE (UserID INT, CaseID INT, DateAssigned DATETIME)
DECLARE #CaseCount INT =10
DECLARE #SaveCaseID INT = #CaseCount
DECLARE #UserCount INT = 100
DECLARE #NumberOfUserAllocatedAtStart INT= 85
WHILE(#CaseCount > 0)BEGIN
INSERT #Case VALUES(#CaseCount)
SET #CaseCount = #CaseCount-1
END
DECLARE #RandomCaseID INT
WHILE(#UserCount > 0)BEGIN
INSERT #User VALUES(#UserCount)
SET #UserCount = #UserCount-1
IF(#NumberOfUserAllocatedAtStart > 0 )BEGIN
SET #RandomCaseID = (ABS(CHECKSUM(NewId())) % (#SaveCaseID))+1
INSERT #UserCase SELECT #UserCount,#RandomCaseID,DATEADD(MONTH,-3,GETDATE())
SET #RandomCaseID = (ABS(CHECKSUM(NewId())) % (#SaveCaseID))+1
INSERT #UserCase SELECT #UserCount,#RandomCaseID,DATEADD(MONTH,-5,GETDATE())
SET #RandomCaseID = (ABS(CHECKSUM(NewId())) % (#SaveCaseID))+1
INSERT #UserCase SELECT #UserCount,#RandomCaseID,DATEADD(MONTH,-2,GETDATE())
SET #NumberOfUserAllocatedAtStart=#NumberOfUserAllocatedAtStart-1
END
END
;WITH RowNumberWithNewID AS
(
SELECT
U.UserID, C.CaseID, UserCase_CaseID = UC.CaseID,
RowNumber = ROW_NUMBER() OVER (PARTITION BY U.UserID ORDER BY NEWID())
FROM
#User U
INNER JOIN #Case C ON 1=1
LEFT OUTER JOIN #UserCase UC ON UC.UserID=U.UserID AND UC.CaseID=C.CaseID AND UC.DateAssigned > DATEADD(MONTH, -4, UC.DateAssigned)
WHERE
UC.CaseID IS NULL OR UC.CaseID <> C.CaseID
)
SELECT
UserID,
CaseID,
PreviousCases = STUFF((SELECT ', '+CONVERT(NVARCHAR(10), UC.CaseID) FROM #UserCase UC WHERE UC.UserID=RN.UserID FOR XML PATH('')),1,1,'')
FROM RowNumberWithNewID RN
WHERE
RN.RowNumber=1

Performance issue with larger resultsets MSSQL

I currently have a stored procedure in MSSQL where I execute a SELECT-statement multiple times based on the variables I give the stored procedure. The stored procedure counts how many results are going to be returned for every filter a user can enable.
The stored procedure isn't the issue, I transformed the select statement from te stored procedure to a regular select statement which looks like:
DECLARE #contentRootId int = 900589
DECLARE #RealtorIdList varchar(2000) = ';880;884;1000;881;885;'
DECLARE #publishSoldOrRentedSinceDate int = 8
DECLARE #isForSale BIT= 1
DECLARE #isForRent BIT= 0
DECLARE #isResidential BIT= 1
--...(another 55 variables)...
--Table to be returned
DECLARE #resultTable TABLE
(
variableName varchar(100),
[value] varchar(200)
)
-- Create table based of inputvariable. Example: turns ';18;118;' to a table containing two ints 18 AND 118
DECLARE #RealtorIdTable table(RealtorId int)
INSERT INTO #RealtorIdTable SELECT * FROM dbo.Split(#RealtorIdList,';') option (maxrecursion 150)
INSERT INTO #resultTable ([value], variableName)
SELECT [Value], VariableName FROM(
Select count(*) as TotalCount,
ISNULL(SUM(CASE WHEN reps.ForRecreation = 1 THEN 1 else 0 end), 0) as ForRecreation,
ISNULL(SUM(CASE WHEN reps.IsQualifiedForSeniors = 1 THEN 1 else 0 end), 0) as IsQualifiedForSeniors,
--...(A whole bunch more SUM(CASE)...
FROM TABLE1 reps
LEFT JOIN temp t on
t.ContentRootID = #contentRootId
AND t.RealEstatePropertyID = reps.ID
WHERE
(EXISTS(select 1 from #RealtorIdTable where RealtorId = reps.RealtorID))
AND (#SelectedGroupIds IS NULL OR EXISTS(select 1 from #SelectedGroupIdtable where GroupId = t.RealEstatePropertyGroupID))
AND (ISNULL(reps.IsForSale,0) = ISNULL(#isForSale,0))
AND (ISNULL(reps.IsForRent, 0) = ISNULL(#isForRent,0))
AND (ISNULL(reps.IsResidential, 0) = ISNULL(#isResidential,0))
AND (ISNULL(reps.IsCommercial, 0) = ISNULL(#isCommercial,0))
AND (ISNULL(reps.IsInvestment, 0) = ISNULL(#isInvestment,0))
AND (ISNULL(reps.IsAgricultural, 0) = ISNULL(#isAgricultural,0))
--...(Around 50 more of these WHERE-statements)...
) as tbl
UNPIVOT (
[Value]
FOR [VariableName] IN(
[TotalCount],
[ForRecreation],
[IsQualifiedForSeniors],
--...(All the other things i selected in above query)...
)
) as d
select * from #resultTable
The combination of a Realtor- and contentID gives me a set default set of X amount of records. When I choose a Combination which gives me ~4600 records, the execution time is around 250ms. When I execute the sattement with a combination that gives me ~600 record, the execution time is about 20ms.
I would like to know why this is happening. I tried removing all SUM(CASE in the select, I tried removing almost everything from the WHERE-clause, and I tried removing the JOIN. But I keep seeing the huge difference between the resultset of 4600 and 600.
Table variables can perform worse when the number of records is large. Consider using a temporary table instead. See When should I use a table variable vs temporary table in sql server?
Also, consider replacing the UNPIVOT by alternative SQL code. Writing your own TSQL code will give you more control and even increase performance. See for example PIVOT, UNPIVOT and performance

With SQL Server, How can I query a table based on a delimited string as the criteria?

I have the following tables:
tbl_File:
FileID | Filename
-----------------
1 | test.jpg
and
tbl_Tag:
TagID | TagName
---------------
1 | Red
and
tbl_TagFile:
ID | TagID | FileID
-------------------
1 | 1 | 1
I need to pass a non-inclusive query against these tables. For example, imagine a list of checkboxes to select one or more tags, and then a search button. I need to pass the TagID's to the query as a PIPE delimited string, such as "1|2|5|"
The search results need to be non-inclusive, such as if it must meet all the criteria. If 3 tags are selected, the results are to be files that have all 3 tags associated with them.
I think I've made this too complicated, but tried iterating over the tags using charindex and stuff to work my way through the string, but it seems there must be an easier way.
I'd like to do this as a function... Such as
SELECT FileID, Filename
FROM tbl_Files
WHERE dbo.udf_FileExistswithTags(#Tags, FileID) = 1
Any efficient way to do this?
It doesn't sound from your example scenario that the actual "need" is to pass a pipe-delimited string. I would highly suggest abandoning that idea and using a Table Value Parameter in your stored procedure. This has numerous advantages in that you will not hit a datatype limit or a "number of parameters" limit that might occur with very large sets of criteria. Additionally it gets away from any need to run a (potentially very slow) UDF.
Split the string into tokens on the application side, and then insert each token as a row in the TVP. Example below:
Create the TVP type in your database:
CREATE TYPE [dbo].[FileNameType] AS TABLE
(
fileName varchar(1000)
)
On the application side, build your list of filename tokens into a recordset:
private static List<SqlDataRecord> BuildFileNameTokenRecords(IEnumerable<string> tokens)
{
var records = new List<SqlDataRecord>();
foreach (string token in tokens){
var record = new SqlDataRecord(
new SqlMetaData[]
{
new SqlMetaData("fileName", SqlDbType.Varchar),
}
);
records.Add(record);
}
return records;
}
Wherever you run your proc from (rough code here):
var records = BuildFileNameTokenRecords(listofstrings);
var sqlCmd = sqlDb.GetStoredProcCommand("FileExists");
sqlDb.AddInParameter(sqlCmd, "tvpFilenameTokens", SqlDbType.Structured, records);
ExecuteNonQuery(sqlCmd);
Filtering your select statement then simply becomes a matter of joining on the tokens in the table parameter. Something like this:
CREATE PROCEDURE dbo.FileExists
(
-- Put additional parameters here
#tvpFilenameTokens dbo.FileNameType READONLY,
)
AS
BEGIN
SELECT FileID, Filename
FROM tbl_Files INNER JOIN #tvpFilenameTokens
ON tbl_Files.FileID = #tvpFilenameTokens.fileName
END
Here is an option that should scale. All of the functionality is available back to SQL Server 2005. It uses a CTE to separate the portion of the query that finds only the FileIDs that have all of the TagIDs passed in, and then that list of FileIDs is joined to the [File] table to get the details. It also uses an INNER JOIN instead of an IN list to match the TagID's.
Please note that the example below uses a SQLCLR splitter that is freely available in the SQL# library (which I wrote, but this function is in the Free version). The specific splitter used is not the important part; it should just be one that is either SQLCLR, an inline tally-table (like the one used in #wewesthemenace's answer), or is the XML method. Just don't use a splitter based on a WHILE-loop or a recursive CTE.
---- TEST SETUP
DECLARE #File TABLE
(
FileID INT NOT NULL PRIMARY KEY,
[Filename] NVARCHAR(200) NOT NULL
);
DECLARE #TagFile TABLE
(
TagID INT NOT NULL,
FileID INT NOT NULL,
PRIMARY KEY (TagID, FileID)
);
INSERT INTO #File VALUES (1, 'File1.txt');
INSERT INTO #File VALUES (2, 'File2.txt');
INSERT INTO #File VALUES (3, 'File3.txt');
INSERT INTO #TagFile VALUES (1, 1);
INSERT INTO #TagFile VALUES (2, 1);
INSERT INTO #TagFile VALUES (5, 1);
INSERT INTO #TagFile VALUES (1, 2);
INSERT INTO #TagFile VALUES (2, 2);
INSERT INTO #TagFile VALUES (4, 2);
INSERT INTO #TagFile VALUES (1, 3);
INSERT INTO #TagFile VALUES (2, 3);
INSERT INTO #TagFile VALUES (5, 3);
INSERT INTO #TagFile VALUES (6, 3);
---- DONE WITH TEST SETUP
DECLARE #TagsToGet VARCHAR(100); -- this would be the proc input parameter
SET #TagsToGet = '1|2|5';
CREATE TABLE #Tags (TagID INT NOT NULL PRIMARY KEY);
DECLARE #NumTags INT;
INSERT INTO #Tags (TagID)
SELECT split.SplitVal
FROM SQL#.String_Split4k(#TagsToGet, '|', 1) split;
SET #NumTags = ##ROWCOUNT;
;WITH files AS
(
SELECT tf.FileID
FROM #TagFile tf
INNER JOIN #Tags tg
ON tg.TagID = tf.TagID
GROUP BY tf.FileID
HAVING COUNT(*) = #NumTags
)
SELECT fl.*
FROM #File fl
INNER JOIN files
ON files.FileID = fl.FileID
ORDER BY fl.[Filename] ASC;
DROP TABLE #Tags; -- don't need this if code above is placed in a proc
Results:
FileID Filename
1 File1.txt
3 File3.txt
Notes
As much as I love TVPs (and I do, when they are done correctly and used appropriately), I would say that they are a bit much for this type of small scale, single dimensional array scenario. There won't really be any performance gain over using a SQLCLR streaming TVF string splitter but it would require more app code and the additional User-Defined Table Type, which can't be updated without first dropping all procs that reference it. That doesn't happen all of the time, but needs to be considered in terms of long-term maintenance costs.
The JOIN between TagFile and the temporary table populated from the split operation should be much more efficient than using an IN list with a subquery for the split operation. An IN list is short-hand for all of the values in it to be their own OR conditions. Hence the JOIN is a fully set-based approach that lets the Query Optimizer do its thang.
The structure I used for the test #TagFile table only has the two relevant IDs in it: TagID and FileID. It does not have the ID field that I assume is an IDENTITY field on this table. Unless there is a very specific reason for needing that IDENTITY field, I would suggest removing it. It adds to inherent benefit as the combination of TagID and FileID is a natural key (i.e. it is both NOT NULL and Unique). And if the Clustered PK of this table were simply those two fields, the JOIN to the temp table of those split-out TagIDs would be quite fast, even with millions of rows in TagFile.
One reason that this approach works so much better than trying to handle this via a function per FileID (outside of the obvious set-based is better than cursor-based reason) is that the list of TagIDs is the same for all files to be checked. So splitting that out more than one time is a waste of effort.
By not splitting the TagID list inline in the query I am able to capture the number of elements in that list with no additional effort. Hence this saves from needing to do a secondary calculation.
Here is a function called DelimitedSplit8K by Jeff Moden. This is used to split strings of length up to 8000. For more info, read this: http://www.sqlservercentral.com/articles/Tally+Table/72993/
CREATE FUNCTION [dbo].[DelimitedSplit8K](
#pString VARCHAR(8000), --WARNING!!! DO NOT USE MAX DATA-TYPES HERE! IT WILL KILL PERFORMANCE!
#pDelimiter CHAR(1)
)
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
WITH E1(N) AS (--10E+1 or 10 rows
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
),
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (
SELECT TOP (ISNULL(DATALENGTH(#pString),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#pString, t.N, 1) = #pDelimiter
),
cteLen(N1, L1) AS(--==== Return start and length (for use in substring)
SELECT
s.N1,
ISNULL(NULLIF(CHARINDEX(#pDelimiter, #pString, s.N1), 0) - s.N1, 8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT
ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = SUBSTRING(#pString, l.N1, l.L1)
FROM cteLen l
Your query would now be:
DECLARE #pString VARCHAR(8000) = '1|3|5'
SELECT
f.*
FROM tbl_File f
INNER JOIN tbl_TagFile tf ON tf.FileID = f.FileID
WHERE
tf.TagID IN(SELECT CAST(item AS INT) FROM dbo.DelimitedSplit8K(#pString, '|'))
GROUP BY f.FileID, f.FileName
HAVING COUNT(tf.ID) = (LEN(#pString) - LEN(REPLACE(#pString,'|','')) + 1)
The statement below counts the number of TagID in the parameter by counting the occurrence of the delimiter | + 1.
(LEN(#pString) - LEN(REPLACE(#pString,'|','')) + 1)
Here is an option that does not require UDF's.
It can be argued that this is also complicated.
DECLARE #TagList VARCHAR(50)
-- pass in this
SET #TagList = '1|3|6'
SELECT
FinalSet.FileID,
FinalSet.Tag,
FinalSet.TotalMatches
FROM
(
SELECT
tbl_TagFile.FileID,
tbl_TagFile.Tag,
COUNT(*) OVER(PARTITION BY tbl_TagFile.FileID) TotalMatches
FROM
(
SELECT 1 FileID, '1' Tag UNION ALL
SELECT 1 , '2' UNION ALL
SELECT 1 , '3' UNION ALL
SELECT 1 , '6' UNION ALL
SELECT 2 , '1' UNION ALL
SELECT 2 , '3'
) tbl_TagFile
INNER JOIN
(
SELECT tbl_Tag.Tag
FROM
(
SELECT '1' Tag UNION ALL
SELECT '2' UNION ALL
SELECT '3' UNION ALL
SELECT '4' UNION ALL
SELECT '5' UNION ALL
SELECT '6'
) tbl_Tag
WHERE '|' + #TagList + '|' LIKE '%|' + Tag + '|%'
) LimitedTagTable
ON LimitedTagTable.Tag = tbl_TagFile.Tag
) FinalSet
WHERE
FinalSet.TotalMatches = (LEN(#TagList) - LEN(REPLACE(#TagList,'|','')) + 1)
There's some complications in this around data types and indexes and stuff but you can see the concept - you are only getting the records that match your passed in string.
subtable LimitedTagTable is your tag list filtered by your input pipe delimited string
subtable FinalSet joins your limited tag list to your list of files
column TotalMatches works out how many tag matches your file had
Finally this line limits the output to those files that had enough matches:
FinalSet.TotalMatches = (LEN(#TagList) - LEN(REPLACE(#TagList,'|','')) + 1)
Please experiment with different inputs and datasets and see if it suits as I have made a number of assumptions.
I'm answering my own question, in hopes that someone can let me know if/how flawed it is. So far it seems to be working but just early testing.
Function:
ALTER FUNCTION [dbo].[udf_FileExistsByTags]
(
#FileID int
,#Tags nvarchar(max)
)
RETURNS bit
AS
BEGIN
DECLARE #Exists bit = 0
DECLARE #Count int = 0
DECLARE #TagTable TABLE ( FileID int, TagID int )
DECLARE #Tag int
WHILE len(#Tags) > 0
BEGIN
SET #Tag = CAST(LEFT(#Tags, charindex('|', #Tags + '|') -1) as int)
SET #Count = #Count + 1
IF EXISTS (SELECT * FROM tbl_FileTag WHERE FileID = #FileID AND TagID = #Tag )
BEGIN
INSERT INTO #TagTable ( FileID, TagID ) VALUES ( #FileID, #Tag )
END
SET #Tags = STUFF(#Tags, 1, charindex('|', #Tags + '|'), '')
END
SET #Exists = CASE WHEN #Count = (SELECT COUNT(*) FROM #TagTable) THEN 1 ELSE 0 END
RETURN #Exists
END
Then in the query:
SELECT * FROM tbl_File a WHERE dbo.udf_FileExistsByTags(a.FileID, #Tags) = 1
So now I'm looking for errors.
What do you think? Probably not every efficient, however this search will be used only on a periodic basis.

TSQL: How do I combine two values (money/int) using UNION ALL without changing the data?

I have the following Query:
create table #Result (Reward varchar(40), Value MONEY);
insert #Result exec GetCurrentCycleQualifierStatusByAccountId #AccountId=76011;
with cteFirstResults as
(select Reward, round(Value,2) as Value from #Result where Reward like '%Balance%'),
cteSecondResults as
(select Reward, convert(INTEGER, Value) as Value from #Result where Reward NOT like '%Balance%')
select * from cteFirstResults
UNION ALL
select * from cteSecondResults;
drop table #Result;
When running a select * individually against each "cte" table, I get the results I want.
But when run all together, I get something like:
Reward Value
------ -----
Daily Balance 4709.00
Value A 1.00
Value B 9.00
I want the Value A/Value B data to show without any decimal values as they do when running a select against the table directly. How do I combine the two queries into one to show this data correctly?
Round(value,0) does nothing.
I can not change the sproc from which I'm gathering the data, but I can make the temp table any way I like.
Thanks,
Jason
The solution:
create table #Result (Reward varchar(40), Value MONEY);
insert #Result exec GetCurrentCycleQualifierStatusByAccountId #AccountId=76011;
With cteFirstResults as
(
Select Reward, Value
From #Result
Where Reward like '%Balance%'
)
, cteSecondResults as
(
Select Reward, cast(Value as int) as Value
From #Result
Where Reward Not like '%Balance%'
)
Select Reward, Cast( Value As varchar(max)) As Value
From cteFirstResults
Union All
Select Reward, Cast( value As varchar(max)) as Value
From cteSecondResults;
drop table #Result;
The problem is the integers are being implicitly cast to decimal because they are being represented in a decimal column.
If you just want the values displayed, cast them both to strings.
CREATE TABLE #test
(
test decimal(9,2)
)
CREATE TABLE #test2
(
test int
)
INSERT INTO #test (test)
SELECT 1.25 UNION ALL
SELECT 172813.99
INSERT INTO #test2 (test)
SELECT 134 UNION ALL
SELECT 41
SELECT CAST(test as varchar(max)) FROM #Test
UNION ALL
SELECT CAST(test as varchar(max)) FROM #Test2
Results:
1.25
172813.99
134
41
In a union SQL Server will assume that the datatype of the second select is the same as the first and where it can convert them, do so. You will have to beat it at its own game and do you own conversion
In the final select (the one with the Union) massage the data in both cases to be a string. Format the output as desired before converting it to a string.
EACH Sql Server column can only have 1 data type.
round(money,4) returns a money(4)*
convert(int) returns an int
Based on data type precedence
#13. money
#16. int
The resultant column is money(4). Therefore ALL values in the column will be formatted using money(4).
Your options are
convert(float) across both - downside: a value of 1.1 is shown as 1.1, not 1.10
convert(varchar) - you have stated you don't want this, and it changes the data type to the receiving program
FWIW
Round(value,0) does nothing.
It does do something. It burns CPU rounding an int value to another int value (of the same value). Incidentally, the resultant type is (still) "int". This has nothing to do with formatting.
REF:
declare #m money
set #m = 12.3233
select SQL_VARIANT_PROPERTY(round(#m,2), 'basetype') -- money
select SQL_VARIANT_PROPERTY(round(#m,2), 'precision') -- 19
select SQL_VARIANT_PROPERTY(round(#m,2), 'scale') -- 4

how to split and insert CSV data into a new table in single statement?

I have a table named "Documents" containing a column as below:
DocumentID
I have data in the format - #DocID = 1,2,3,4
How do I insert these documentID's in separate rows using a single query?
You need a way to split and process the string in TSQL, there are many ways to do this. This article covers the PROs and CONs of just about every method:
Arrays and Lists in SQL Server 2005 and Beyond
You need to create a split function. This is how a split function can be used:
SELECT
*
FROM YourTable y
INNER JOIN dbo.yourSplitFunction(#Parameter) s ON y.ID=s.Value
I prefer the number table approach to split a string in TSQL - Using a Table of Numbers but there are numerous ways to split strings in SQL Server, see the previous link, which explains the PROs and CONs of each.
For the Numbers Table method to work, you need to do this one time table setup, which will create a table Numbers that contains rows from 1 to 10,000:
SELECT TOP 10000 IDENTITY(int,1,1) AS Number
INTO Numbers
FROM sys.objects s1
CROSS JOIN sys.objects s2
ALTER TABLE Numbers ADD CONSTRAINT PK_Numbers PRIMARY KEY CLUSTERED (Number)
Once the Numbers table is set up, create this split function:
CREATE FUNCTION inline_split_me (#SplitOn char(1),#param varchar(7998)) RETURNS TABLE AS
RETURN(SELECT substring(#SplitOn + #param + ',', Number + 1,
charindex(#SplitOn, #SplitOn + #param + #SplitOn, Number + 1) - Number - 1)
AS Value
FROM Numbers
WHERE Number <= len(#SplitOn + #param + #SplitOn) - 1
AND substring(#SplitOn + #param + #SplitOn, Number, 1) = #SplitOn)
GO
You can now easily split a CSV string into a table and join on it:
select * from dbo.inline_split_me(';','1;22;333;4444;;') where LEN(Value)>0
OUTPUT:
Value
----------------------
1
22
333
4444
(4 row(s) affected)
to make you new table use this:
--set up tables:
DECLARE #Documents table (DocumentID varchar(500), SomeValue varchar(5))
INSERT #Documents VALUES ('1,2,3,4','AAA')
INSERT #Documents VALUES ('5,6' ,'BBBB')
DECLARE #NewDocuments table (DocumentID int, SomeValue varchar(5))
--populate NewDocuments
INSERT #NewDocuments
(DocumentID, SomeValue)
SELECT
c.value,a.SomeValue
FROM #Documents a
CROSS APPLY dbo.inline_split_me(',',a.DocumentID) c
--show NewDocuments contents:
select * from #NewDocuments
OUTPUT:
DocumentID SomeValue
----------- ---------
1 AAA
2 AAA
3 AAA
4 AAA
5 BBBB
6 BBBB
(6 row(s) affected)
if you don't want to create a Numbers tableand are running SQL Server 2005 and up, you can just use this split function (no Numbers table required):
CREATE FUNCTION inline_split_me (#SplitOn char(1),#String varchar(7998))
RETURNS TABLE AS
RETURN (WITH SplitSting AS
(SELECT
LEFT(#String,CHARINDEX(#SplitOn,#String)-1) AS Part
,RIGHT(#String,LEN(#String)-CHARINDEX(#SplitOn,#String)) AS Remainder
WHERE #String IS NOT NULL AND CHARINDEX(#SplitOn,#String)>0
UNION ALL
SELECT
LEFT(Remainder,CHARINDEX(#SplitOn,Remainder)-1)
,RIGHT(Remainder,LEN(Remainder)-CHARINDEX(#SplitOn,Remainder))
FROM SplitSting
WHERE Remainder IS NOT NULL AND CHARINDEX(#SplitOn,Remainder)>0
UNION ALL
SELECT
Remainder,null
FROM SplitSting
WHERE Remainder IS NOT NULL AND CHARINDEX(#SplitOn,Remainder)=0
)
SELECT Part FROM SplitSting
)
GO
+1 for KM's thorough explanation. This will get the job done quickly but maybe not necessarily most efficiently (again see KM's response for all the options)
My quick response:
Install SQL# (it's free and very useful)
Then
INSERT INTO Documents (documentId)
SELECT SplitVal FROM SQL#.String_Split(#DocId, ',', 1)

Resources