SQL Server index options (with) using variable - sql-server

I would like to create an index and making its options (with) as configurable, example I will take sort_IN_TempDB
DECLARE #On NUMERIC(10,2) = 1
CREATE NONCLUSTERED INDEX NCI_TN
ON Table_Name (Student_ID)
WITH (SORT_IN_TEMPDB = #On)
But it throws an error and says the excepted value is Numeric, Integer, ON, or OFF. Is it possible to make options value from variable?

In SQL Server DDL cannot be parameterized, and in statements that can be parameterized, object identifiers and keywords can't be parameterized.
You'll need to use dynamic SQL, eg
DECLARE #On NUMERIC(10,2) = 1
declare #sql nvarchar(max) = concat( N'
CREATE NONCLUSTERED INDEX NCI_TN
ON Table_Name (Student_ID)
WITH (SORT_IN_TEMPDB = ', case when #On = 1 then 'ON' else 'OFF' end,')')
print #sql
exec (#sql)
without dynamic SQL you can use something like
DECLARE #On NUMERIC(10,2) = 1
if #On =1
begin
CREATE NONCLUSTERED INDEX NCI_TN
ON Table_Name (Student_ID)
WITH (SORT_IN_TEMPDB = ON)
end
else
begin
CREATE NONCLUSTERED INDEX NCI_TN
ON Table_Name (Student_ID)
WITH (SORT_IN_TEMPDB = OFF)
end

Related

Procedure to fix table builds cci to ci

All our platform team has asked our team to refrain from using unnecessary Clustered Columnstore
I'm trying to create a proc that we can use
pass in the table
and the index column.
I'm getting an error when trying to create
"Parse error at line: 25, column: 14: Incorrect syntax near '#A'"
I don't see an issue; what am I missing?
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE PROC [lab03].[Sproc_TableCCItoCI] #Table [NVARCHAR](150),#TheColumn [NVARCHAR](150)
AS
-- exec lab03.Sproc_TableCCItoCI #param1 = '', #param2 = ''
-- ==========================================================================================================================================================================
-- File Name:
--
-- Purpose: Fix tables built using clustered - columnstore
--
-- Version Date Changed by Description
-- ------- ---------- ------------------- -------------------------------------------
-- 1.0 2021-07-22 Sxxx Move to prod
--
-- ==========================================================================================================================================================================
-- converting Clustered Columnstore(CCI) index to Clustered Index (CI)
DECLARE #A Varchar(6)
SET #A = 'lab16.'
CREATE TABLE #A + #Table + '_convert'
WITH (
DISTRIBUTION = HASH (#TheColumn),
CLUSTERED INDEX (#TheColumn)
)
AS
SELECT
*
FROM #A + #Table
--save current table just in case, you’ll drop this as soon as process is complete
RENAME OBJECT #A + #Table TO #Table + '_Hold'
--renames new table to the existing name
RENAME OBJECT #A + #Table + '_convert' TO #Table;
--validate if desired then drop the hold table
DROP TABLE #A + #Table +'_Hold';
GO
I think you need to understand dynamic SQL better and I can highly recommend Erland Sommarskog's excellent article 'The Curse and Blessings of Dynamic SQL'.
I've adapted an example from our Synapse db which is doing something similar and shows a method of parameterising dynamic sql plus doing some error checking in Synapse which is worthwhile when things go wrong. Dedicated SQL Pools do not currently support the RETURN statement so just kind of ploughs on when errors occur so that's why it's good to collect as much info as possible through the error messages. See what you think of this:
IF OBJECT_ID('dbo.usp_convertTableToCI') IS NOT NULL
DROP PROC dbo.usp_convertTableToCI;
GO
CREATE PROC dbo.usp_convertTableToCI
#schemaName SYSNAME,
#tableName SYSNAME,
#columnName SYSNAME,
#debug_yn BIT
AS
DECLARE #sql NVARCHAR(MAX);
SET NOCOUNT ON;
-- Check schema exists
IF NOT EXISTS ( SELECT * FROM sys.schemas WHERE [name] = #schemaName )
RAISERROR( 'Source schema [%s] does not exist.', 16, 1, #schemaName );
-- Check table exists
IF NOT EXISTS ( SELECT * FROM sys.tables WHERE SCHEMA_NAME(schema_id) = #schemaName AND [name] = #tableName )
RAISERROR( 'Source table [%s].[%s] does not exist.', 16, 1, #schemaName, #tableName );
-- Check column exists
IF NOT EXISTS ( SELECT * FROM sys.columns WHERE OBJECT_SCHEMA_NAME(object_id) = #schemaName AND OBJECT_NAME(object_id) = #tableName AND [name] = #columnName )
RAISERROR( 'Column [%s] does not exist in table [%s].[%s].', 16, 1, #columnName, #schemaName, #tableName );
-- Assemble the dynamic SQL to swap the table over to clustered index
SET #sql = 'CREATE TABLE #schemaName.#tableName_convert
WITH (
DISTRIBUTION = HASH ( #columnName ),
CLUSTERED INDEX ( #columnName )
)
AS
SELECT *
FROM #schemaName.#tableName;
-- Save current table just in case, you’ll drop this as soon as process is complete
RENAME OBJECT #schemaName.#tableName TO #tableName_Hold
-- Renames new table to the existing name
RENAME OBJECT #schemaName.#tableName_convert TO #tableName;
-- Validate if desired then drop the hold table
--DROP TABLE #schemaName.#tableName_Hold;'
-- Replace the variable names
SET #sql = REPLACE(
REPLACE(
REPLACE( #sql, '#schemaName', #schemaName ),
'#tableName', #tableName ),
'#columnName', #columnName )
IF #debug_yn = 1
PRINT #sql;
ELSE
BEGIN
PRINT #sql;
EXEC(#sql);
END
GO
I would strongly suggest giving it some thorough tests for your scenarios.

SQL Server delete all tables under special schema with temporal tables

I'm trying to delete a database scheme with temporal tables.
Non of the existing scripts found through googling, supports temporal tables.
Is there anyone already done this?
There are many temporal tables on that scheme with many constraints with dependencies. so when I try to drop the scheme it complain about dependencies.
Basically I'm looking for a stored procedure or something that go through all the DB objects and remove one by one.
Script to Create sample tables
USE [master];
GO
CREATE DATABASE [TestDb];
GO
USE [TestDb];
GO
CREATE SCHEMA [TestScheme];
GO
SET ANSI_NULLS ON;
GO
SET QUOTED_IDENTIFIER ON;
GO
CREATE TABLE [TestScheme].[Country]
(
[CountryCode] [char](2) NOT NULL,
[Country] [varchar](60) NOT NULL,
[ValidFrom] [datetime2](2) GENERATED ALWAYS AS ROW START NOT NULL,
[ValidTo] [datetime2](2) GENERATED ALWAYS AS ROW END NOT NULL,
CONSTRAINT [PK_TestScheme_Country_CountryCode]
PRIMARY KEY CLUSTERED([CountryCode] ASC)
WITH (PAD_INDEX = ON, STATISTICS_NORECOMPUTE = OFF,
IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON,
ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 95) ON [PRIMARY],
PERIOD FOR SYSTEM_TIME([ValidFrom], [ValidTo])
) ON [PRIMARY]
WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = [TestScheme].[CountryHistory]));
GO
SET ANSI_NULLS ON;
GO
SET QUOTED_IDENTIFIER ON;
GO
CREATE TABLE [TestScheme].[Address]
(
[AddressId] [int] IDENTITY(1, 1) NOT NULL,
[City] [varchar](100) NOT NULL,
[CountryCode] [char](2) NOT NULL,
[ValidFrom] [datetime2](7) GENERATED ALWAYS AS ROW START NOT NULL,
[ValidTo] [datetime2](7) GENERATED ALWAYS AS ROW END NOT NULL,
CONSTRAINT [PK_TestScheme_Address_AddressId]
PRIMARY KEY CLUSTERED([AddressId] ASC)
WITH (PAD_INDEX = ON, STATISTICS_NORECOMPUTE = OFF,
IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON,
ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 100) ON [PRIMARY],
PERIOD FOR SYSTEM_TIME([ValidFrom], [ValidTo])
)
ON [PRIMARY]
WITH (SYSTEM_VERSIONING = ON(HISTORY_TABLE = [TestScheme].[AddressHistory]));
GO
ALTER TABLE [TestScheme].[Address] WITH CHECK
ADD CONSTRAINT [FK_TestScheme_CountryCode]
FOREIGN KEY([CountryCode]) REFERENCES [TestScheme].[Country]([CountryCode]);
GO
ALTER TABLE [TestScheme].[Address] CHECK CONSTRAINT [FK_TestScheme_CountryCode];
GO
Query to drop scheme:
USE [TestDb];
GO
DROP SCHEMA [TestScheme];
GO
Query to delete table:
USE [TestDb]
GO
ALTER TABLE [TestScheme].[Country] SET (SYSTEM_VERSIONING = OFF)
GO
IF EXISTS (SELECT * FROM sys.objects
WHERE object_id = OBJECT_ID(N'[TestScheme].[Country]') AND type in (N'U'))
DROP TABLE [TestScheme].[Country]
GO
IF EXISTS (SELECT * FROM sys.objects
WHERE object_id = OBJECT_ID(N'[TestScheme].[CountryHistory]') AND type in (N'U'))
DROP TABLE [TestScheme].[CountryHistory]
GO
So the problem is there are many DB objects that I really don't want to create a huge script to delete one by one.
Thanks!
Thanks every one, following is the script I created and it worked for me.
USE TestDb;
GO
DECLARE #SchemeName varchar(50)= 'TestScheme';
DECLARE #DatabaseName varchar(50)= 'TestDb';
DECLARE #sql nvarchar(max)= '';
/*Removing versioning on temporal tables*/
WITH selectedTables
AS (SELECT concat('[', #DatabaseName, '].[', #SchemeName, '].[', name, ']') AS TableName
FROM SYS.TABLES WHERE history_table_id IS NOT NULL AND SCHEMA_NAME(schema_id) = #SchemeName)
SELECT #sql = COALESCE(#sql, N'') + 'ALTER TABLE ' + TableName + ' SET ( SYSTEM_VERSIONING = OFF );'
FROM selectedTables;
SELECT #sql;
EXEC sp_executesql #sql;
/*Remove constraints*/
SET #sql = N'';
SELECT #sql = COALESCE(#sql, N'') + N'ALTER TABLE ' + QUOTENAME(s.name) + N'.' + QUOTENAME(t.name) + N' DROP CONSTRAINT ' + QUOTENAME(c.name) + ';'
FROM SYS.OBJECTS AS c INNER JOIN SYS.TABLES AS t ON c.parent_object_id = t.[object_id]
INNER JOIN SYS.SCHEMAS AS s ON t.[schema_id] = s.[schema_id]
WHERE c.[type] IN( 'D', 'C', 'F', 'PK', 'UQ' ) AND s.name = #SchemeName
ORDER BY c.[type];
SELECT #sql;
EXEC sp_executesql #sql;
/*Delete Tables*/
SET #sql = N'';
SELECT #sql = COALESCE(#sql, N'') + N'DROP TABLE ['+#SchemeName+'].' + QUOTENAME(TABLE_NAME) + N';' + CHAR(13)
FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = #SchemeName AND TABLE_TYPE = 'BASE TABLE';
SELECT #sql
EXEC sp_executesql #sql;
/*Drop scheme*/
SET #sql = N'';
SELECT #sql = COALESCE(#sql, N'') + N'DROP SCHEMA IF EXISTS ' + #SchemeName + ';' + CHAR(13);
SELECT #sql
EXEC sp_executesql #sql;
GO
Thanks again!

Which is preferred SELECT or SET

When assigning a variable, is there a functional difference between set and select?
I often write scripts that have to iterate through processes, and on each iteration I update a variable with a value.
For example, my company's product has multiple servers, on each server we have a certain number of databases that our clients data resides in. Each database has between 5 and 50 clients. A table in a primary database indicates which of the individual databases each client is on. Today I found a problem with the primary key on one table, and we need to modify the primary key to add a column to it. The table on each database may have several hundred thousand records, so we expect the updates to take some time. We need to do these overnight to avoid performance issues. So I've written the following script to iterate through the process on each database (I'll execute it separately on each server):
DECLARE #DBTABLE TABLE
(
TableID INT IDENTITY PRIMARY KEY NOT NULL,
DbName VARCHAR(50) NOT NULL,
ServerName VARCHAR(50) NOT NULL,
ProcFlag INT NOT NULL DEFAULT 0
)
INSERT INTO #DBTABLE (DbName, ServerName)
SELECT DISTINCT DbName, ServerName
FROM PrimaryDatabase.dbo.Cients WITH(NOLOCK)
WHERE ClientInactive = 0
AND ServerName = ##SERVERNAME
DECLARE #TABLETEST INT
DECLARE #TABLEID INT
DECLARE #DBNAME VARCHAR(50)
DECLARE #SERVERNAME VARCHAR(50)
DECLARE #VAR_SQL VARCHAR(MAX)
SET #TABLETEST = (SELECT COUNT(*) FROM #DBTABLE WHERE ProcFlag = 0)
WHILE #TABLETEST > 0
BEGIN
SET #TABLEID = (SELECT MIN(TableID) FROM #DBTABLE WHERE ProcFlag = 0)
SET #DBNAME = (SELECT DbName FROM #DBTABLE WHERE TableID = #TABLEID)
SET #SERVERNAME = (SELECT ServerName FROM #DBTABLE WHERE TableID = #TABLEID)
SET #VAR_SQL = '
ALTER TABLE ' + #DBNAME + '.dbo.ClientDealTable DROP CONSTRAINT [PK_ClientDealTable]
ALTER TABLE ' + #DBNAME + '.dbo.ClientDealTable ADD CONSTRAINT [PK_ClientDealTable] PRIMARY KEY CLUSTERED ([ClientID] ASC, [DealNumber] ASC, [DealDate] ASC)
'
EXEC(#VAR_SQL)
UPDATE #DBTABLE SET ProcFlag = 1 WHERE TableID = #TABLEID
SET #TABLETEST = (SELECT COUNT(*) FROM #DBTABLE WHERE ProcFlag = 0)
END
Is SET or SELECT the preferred option here, or does it really matter? Is there a performance difference?
SET can only set the value of a single variable. using SELECT you can set the value of any number of variables.
But in your code I wouldn't use either. I would do this without looping. Not to mention there is a mountain less code to write. This should do the same thing and is a lot simpler.
DECLARE #VAR_SQL VARCHAR(MAX)
SELECT #VAR_SQL = 'ALTER TABLE ' + QUOTENAME(DbName) + '.dbo.ClientDealTable DROP CONSTRAINT [PK_ClientDealTable];ALTER TABLE ' + QUOTENAME(DbName) + '.dbo.ClientDealTable ADD CONSTRAINT [PK_ClientDealTable] PRIMARY KEY CLUSTERED ([ClientID] ASC, [DealNumber] ASC, [DealDate] ASC)'
FROM PrimaryDatabase.dbo.Cients --WITH(NOLOCK)
WHERE ClientInactive = 0
AND ServerName = ##SERVERNAME
exec sp_executesql #VAR_SQL

TSQL sp_executesql

I have a simple table:
CREATE TABLE [dbo].[test]
(
[eins] [varchar](50) NOT NULL,
[zwei] [varchar](50) NULL,
CONSTRAINT [PK_test]
PRIMARY KEY CLUSTERED ([eins] ASC)
WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF,
IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON,
ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
with two columns eins und zwei, both varchar(50)
with the values
insert into test(eins, zwei) values(1,2)
1 and 2 in the corresponding columns.
The query
select eins from test
gives the correct result of 1
the following code also gives the correct result of 1 in the results window:
declare
#in varchar(50),
#sql nvarchar(500)
set #in = 'eins'
set #sql = 'select ' + #in + ' from test'
Exec(#sql)
However, it doesn't make use of an output parameter and I need the result for further processing.
So, I try:
exec sp_executesql N' Select #1 from test where zwei = #2',N'#1 nvarchar(100),#2 nvarchar(100)',#1=N'eins',#2=N'2'
with an expected result of 1. However: the result is eins, i.e., the column name, not the value.
How can I query for something like Select #Variable from #Variable2 where #variabel3 = #Variable4?
The table and columns can be non-variable, if need be, what's primarily important is, the Select #Variable. I need this value for further processing.
Try something like this
DECLARE #result int
exec sp_executesql
N'Select #1=eins from test where zwei = #2',
N'#1 nvarchar(100) OUTPUT,#2 nvarchar(100)',
#1=#result OUTPUT,#2=N'2'
SELECT #result
What that does is say that the #1 is an OUTPUT variable inside the EXECed query string. Then it binds #result to the #1, so you can retrieve it. I've never found OUTPUT parameters very intuitive to use.
The Code from DWright in the last post has the correct result, but the main problem isn't solved.
I dont know the name of the column while writing the code. The following code seems to be correct:
Declare #result int
Declare #sql nvarchar(500)
Declare #columnname nvarchar(50)
set #columnname = 'eins'
set #sql= N'Select #1= ' + #columnname +' from test1 where zwei = #2'
exec sp_executesql
#sql,
N'#1 nvarchar(100) OUTPUT,#2 nvarchar(100)',
#1=#result OUTPUT,#2=N'2'
SELECT #result
And the result is the expectet 1
Thank you for helping

Quoted_Identifier SQL Server - Off - Unsure How

We've been having an issue every few months where all of a sudden a job or procedure will start failing due to a quoted_Identifier issue. The quoted identifier in the proc or even table will change from 1 to 0 and we don't see any recent modified date on that record. We aren't sure how this is happening and because it's so sporadic, I can't reproduce it or trace it easily. Any ideas as to why this is occurring or what I can do to find out. I've done a lot of research without luck so far.
Thanks
You could create a DDL trigger activated by CREATE/ALTER PROCEDURE events. Inside this trigger you could use EVENTDATE function to get information about SQL statements execute including ANSI_NULLS and QUOTED_IDENTIFIER settings.
For example, you could use ddlDatabaseTriggerLog ddl trigger from Adventure Works OLTP database that insert into [dbo].[DatabaseLog] all ddl changes from current database.
DDL Trigger:
CREATE TRIGGER [ddlDatabaseTriggerLog] ON DATABASE
FOR DDL_DATABASE_LEVEL_EVENTS AS
BEGIN
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
DECLARE #data XML;
DECLARE #schema sysname;
DECLARE #object sysname;
DECLARE #eventType sysname;
SET #data = EVENTDATA();
SET #eventType = #data.value('(/EVENT_INSTANCE/EventType)[1]', 'sysname');
SET #schema = #data.value('(/EVENT_INSTANCE/SchemaName)[1]', 'sysname');
SET #object = #data.value('(/EVENT_INSTANCE/ObjectName)[1]', 'sysname')
IF #object IS NOT NULL
PRINT ' ' + #eventType + ' - ' + #schema + '.' + #object;
ELSE
PRINT ' ' + #eventType + ' - ' + #schema;
IF #eventType IS NULL
PRINT CONVERT(nvarchar(max), #data);
INSERT [dbo].[DatabaseLog]
(
[PostTime],
[DatabaseUser],
[Event],
[Schema],
[Object],
[TSQL],
[XmlEvent]
)
VALUES
(
GETDATE(),
CONVERT(sysname, CURRENT_USER),
#eventType,
CONVERT(sysname, #schema),
CONVERT(sysname, #object),
#data.value('(/EVENT_INSTANCE/TSQLCommand)[1]', 'nvarchar(max)'),
#data
);
END;
GO
[dbo].[DatabaseLog]
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE TABLE [dbo].[DatabaseLog](
[DatabaseLogID] [int] IDENTITY(1,1) NOT NULL,
[PostTime] [datetime] NOT NULL,
[DatabaseUser] [sysname] NOT NULL,
[Event] [sysname] NOT NULL,
[Schema] [sysname] NULL,
[Object] [sysname] NULL,
[TSQL] [nvarchar](max) NOT NULL,
[XmlEvent] [xml] NOT NULL,
CONSTRAINT [PK_DatabaseLog_DatabaseLogID] PRIMARY KEY NONCLUSTERED
(
[DatabaseLogID] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY]
GO
Who/when changed dbo.uspGetEmployeeManagers procedure ?
SELECT *
FROM [dbo].[DatabaseLog] x
WHERE x.[Event] IN ('CREATE_PROCEDURE', 'ALTER_PROCEDURE')
AND x.[Schema] = 'dbo'
AND x.[Object] = 'uspGetEmployeeManagers'
XmlEvent column content:
<EVENT_INSTANCE>
<EventType>CREATE_PROCEDURE</EventType>
...
<TSQLCommand>
<SetOptions ANSI_NULLS="ON" ANSI_NULL_DEFAULT="ON" ANSI_PADDING="ON" QUOTED_IDENTIFIER="ON" ENCRYPTED="FALSE" />
<CommandText>
CREATE PROCEDURE [dbo].[uspGetEmployeeManagers]
#BusinessEntityID [int]
AS
...
Those scripts are helpful and I'll use them in the future. I figured out what the issue is. A stored procedure had quoted_identifier set to 0 and it had been that way for at least a year. However, someone created a filtered index on a table that the stored procedure was using. They didn't realize that the filtered index required quoted_identifier to be set to 1.

Resources