Generate script for triggers only using script wizard - sql-server

I have SQL Server 2008 R2. I have around 150 tables in a database and for each table I have recently created triggers. It is working fine in my local environment.
Now I want to deploy them on my live environment. The question is I want to deploy only the triggers.
I tried the Generate Script wizard but it is creating script with table schema along with triggers, NOT triggers only.
Is there anyway to generate all the triggers drop and create type script?

Forget the wizard. I think you have to get your hands dirty with code. Script below prints all triggers code and stores it into table. Just copy the script's print output or get it from #triggerFullText.
USE YourDatabaseName
GO
SET NOCOUNT ON;
CREATE TABLE #triggerFullText ([TriggerName] VARCHAR(500), [Text] VARCHAR(MAX))
CREATE TABLE #triggerLines ([Text] VARCHAR(MAX))
DECLARE #triggerName VARCHAR(500)
DECLARE #fullText VARCHAR(MAX)
SELECT #triggerName = MIN(name)
FROM sys.triggers
WHILE #triggerName IS NOT NULL
BEGIN
INSERT INTO #triggerLines
EXEC sp_helptext #triggerName
--sp_helptext gives us one row per trigger line
--here we join lines into one variable
SELECT #fullText = ISNULL(#fullText, '') + CHAR(10) + [TEXT]
FROM #triggerLines
--adding "GO" for ease of copy paste execution
SET #fullText = #fullText + CHAR(10) + 'GO' + CHAR(10)
PRINT #fullText
--accumulating result for future manipulations
INSERT INTO #triggerFullText([TriggerName], [Text])
VALUES(#triggerName, #fullText)
--iterating over next trigger
SELECT #triggerName = MIN(name)
FROM sys.triggers
WHERE name > #triggerName
SET #fullText = NULL
TRUNCATE TABLE #triggerLines
END
DROP TABLE #triggerFullText
DROP TABLE #triggerLines

Just in generate scripts wizard in the second step ("Set Scripting Options) press Advanced button=> Table/View Options=> Set Script Triggers to True.
check also this link or this. If you want only triggers just select one table to proceed the next step.

Related

How can I edit a stored procedure?

I have several stored procedures in my database, structured like this:
CREATE PROCEDURE MyProcedure (.....)
AS
DECLARE #myvar NVARCHAR(100);
SET #myvar = (SELECT .... FROM my_table WHERE ....)
GO
I was asked to replace the table my_table in the FROM clause with another one in every procedure that has it.
I went through a lot of researches, but I should create a script that works by itself, and I haven't found anything suitable. For example I found the sp_helpTetx that shows the source code of a stored procedure, but is there a way to put it into a variable in order to edit it?
You can use tool like REDGATE SqlRefactor that works perfectly or you can script all the stored procedures, replace CREATE command with ALTER and then apply the other REPLACE in text you need...
I do it lot of time, you have to pay attention but it works...
Find all stored procedures with a reference to that table (you can either use the dependencies stuff built into SQL Server or run a query looking for that table name see Search text in stored procedure in SQL Server)
Script them out with an "ALTER" instead of "CREATE" Press CTRL-H (find and replace)
Execute the script.
Here is an article outlining how to handle this using a cursor, and the sp_HelpText as mentioned above (including set as also mentioned).
http://www.ideosity.com/ourblog/post/ideosphere-blog/2013/06/14/how-to-find-and-replace-text-in-all-stored-procedures
-- set "Result to Text" mode by pressing Ctrl+T
SET NOCOUNT ON
DECLARE #sqlToRun VARCHAR(1000), #searchFor VARCHAR(100), #replaceWith VARCHAR(100)
-- text to search for
SET #searchFor = '[MY-SERVER]'
-- text to replace with
SET #replaceWith = '[MY-SERVER2]'
-- this will hold stored procedures text
DECLARE #temp TABLE (spText VARCHAR(MAX))
DECLARE curHelp CURSOR FAST_FORWARD
FOR
-- get text of all stored procedures that contain search string
-- I am using custom escape character here since i need to espape [ and ] in search string
SELECT DISTINCT 'sp_helptext '''+OBJECT_SCHEMA_NAME(id)+'.'+OBJECT_NAME(id)+''' '
FROM syscomments WHERE TEXT LIKE '%' + REPLACE(REPLACE(#searchFor,']','\]'),'[','\[') + '%' ESCAPE '\'
ORDER BY 'sp_helptext '''+OBJECT_SCHEMA_NAME(id)+'.'+OBJECT_NAME(id)+''' '
OPEN curHelp
FETCH next FROM curHelp INTO #sqlToRun
WHILE ##FETCH_STATUS = 0
BEGIN
--insert stored procedure text into a temporary table
INSERT INTO #temp
EXEC (#sqlToRun)
-- add GO after each stored procedure
INSERT INTO #temp
VALUES ('GO')
FETCH next FROM curHelp INTO #sqlToRun
END
CLOSE curHelp
DEALLOCATE curHelp
-- find and replace search string in stored procedures
-- also replace CREATE PROCEDURE with ALTER PROCEDURE
UPDATE #temp
SET spText = REPLACE(REPLACE(spText,'CREATE PROCEDURE', 'ALTER PROCEDURE'),#searchFor,#replaceWith)
SELECT spText FROM #temp
-- now copy and paste result into new window
-- then make sure everything looks good and run
GO
If sp_HelpText returns a table, why not you use a cursor to loop over the results and join the resulting strings together? It's nasty, but would do the trick.

SQL Server : update records in dynamically generated tables using parameters in stored procedure

I have to create a stored procedure where I will pass tableName, columnName, id as parameters. The task is to select records from the passed table where columnName has passed id. If record is found update records with some fixed data. Also implement Transaction so that we can rollback in case of any error.
There are hundreds of table in database and each table has different schema that is why I have to pass columnName.
Don't know what is the best approach for this. I am trying select records into a temp table so that I can manipulate it as per requirement but its not working.
I am using this code:
ALTER PROCEDURE [dbo].[GetRecordsFromTable]
#tblName nvarchar(128),
#keyCol varchar(100),
#key int = 0
AS
BEGIN
SET NOCOUNT ON;
BEGIN TRY
--DROP TABLE #TempTable;
DECLARE #sqlQuery nvarchar(4000);
SET #sqlQuery = 'SELECT * FROM ' + #tblName + ' WHERE ' + #keyCol + ' = 2';
PRINT #sqlQuery;
INSERT INTO #TempTable
EXEC sp_executesql #sqlQuery,
N'#keyCol varchar(100), #key int', #keyCol, #key;
SELECT * FROM #TempTable;
END TRY
BEGIN CATCH
EXECUTE [dbo].[uspPrintError];
END CATCH;
END
I get an error
Invalid object name '#TempTable'
Also not sure if this is the best approach to get data and then update it.
If you absolutely must make that work then I think you'll have to use a global temp table. You'll need to see if it exists before running your dynamic sql and clean up. With a fixed table name you'll run into problems with other connections. Inside the dynamic sql you'll add select * into ##temptable from .... Actually I'm not even sure why you want the temp table in the first place. Can't the dynamic sql just return the results?
On the surface it seems like a solid idea to have one generic procedure for returning data with a couple of parameters to drive it but, without a lot of explanation, it's just not the way database are designed to work.
You should create the temp table.
IF OBJECT_ID('tempdb..##TempTable') IS NOT NULL
DROP TABLE ##TempTable
CREATE TABLE ##TempTable()

SQL Server - How to copy data from dropped table when DROP_TABLE TRIGGER fires after table is dropped?

I do not wish to prevent table drops, but when certain tables are dropped in a database I would like to back up either the entire table or query the rows and select specific rows into another table before the drop.
With a normal trigger on a table, if a row was deleted you could access the 'Deleted' table and access those deleted rows.
The DROP_TABLE trigger fires after the table is dropped.
Is there an equivalent to the Deleted table for a DROP_TABLE trigger?
Is there a different approach I could use?
Or am I going to have to re-code the business logic in the windows service which creates and drops these tables?
(I REALLY don't want to write a trigger which rolls-back the drop, accesses and copies-out the data, then re-drops the table without firing the trigger recursively. I like inventiveness, but this is too mucky a solution for me)
I am running this on Microsoft SQL Server Enterprise Edition (64-bit)
and Microsoft SQL Server Developer Edition (64-bit)
Thanks for the help guys, but to directly answer my own questions:
For DDL triggers (which fire for DROP TABLE), there is no equivalent to the Deleted table within DELETE/UPDATE triggers
There is no equivalent solution without rolling-back the drop, copying-out the data and re-issuing the drop
The only appropriate and correct approach is to re-code the business logic in the windows service which creates and drops these tables - to permit a soft-delete/move/rename when required
If it's the recursive firing of the trigger that bothers you, that can be checked for. This will only run for the initial DROP TABLE.
alter Trigger ddlt_ProcessDropTable
on all server for drop_table
AS
begin
if( trigger_nestlevel() = 1 ) -- only run if top level drop table
begin
declare #data XML
set #data = EVENTDATA()
-- rollback the drop
rollback;
-- get table name
declare #TableName sysname, #SchemaName sysname, #DataBaseName sysname, #Sql nvarchar(1000);
select
#TableName = #data.value('(/EVENT_INSTANCE/ObjectName)[1]', 'nvarchar(2000)'),
#SchemaName = #data.value('(/EVENT_INSTANCE/SchemaName)[1]', 'nvarchar(2000)'),
#DataBaseName = #data.value('(/EVENT_INSTANCE/DatabaseName)[1]', 'nvarchar(2000)')
/****
Do stuff with the dropped table...
****/
-- re-drop the table
set #sql = 'Drop Table ' +
QuoteName(#DataBaseName) + '.' + QuoteName(#SchemaName) + '.' + QuoteName(#TableName)
exec(#sql)
end
end
GO

Linked Server Insert-Select Performance

Assume that I have a table on my local which is Local_Table and I have another server and another db and table, which is Remote_Table (table structures are the same).
Local_Table has data, Remote_Table doesn't. I want to transfer data from Local_Table to Remote_Table with this query:
Insert into RemoteServer.RemoteDb..Remote_Table
select * from Local_Table (nolock)
But the performance is quite slow.
However, when I use SQL Server import-export wizard, transfer is really fast.
What am I doing wrong? Why is it fast with Import-Export wizard and slow with insert-select statement? Any ideas?
The fastest way is to pull the data rather than push it. When the tables are pushed, every row requires a connection, an insert, and a disconnect.
If you can't pull the data, because you have a one way trust relationship between the servers, the work around is to construct the entire table as a giant T-SQL statement and run it all at once.
DECLARE #xml XML
SET #xml = (
SELECT 'insert Remote_Table values (' + '''' + isnull(first_col, 'NULL') + ''',' +
-- repeat for each col
'''' + isnull(last_col, 'NULL') + '''' + ');'
FROM Local_Table
FOR XML path('')
) --This concatenates all the rows into a single xml object, the empty path keeps it from having <colname> </colname> wrapped arround each value
DECLARE #sql AS VARCHAR(max)
SET #sql = 'set nocount on;' + cast(#xml AS VARCHAR(max)) + 'set nocount off;' --Converts XML back to a long string
EXEC ('use RemoteDb;' + #sql) AT RemoteServer
It seems like it's much faster to pull data from a linked server than to push data to a linked server: Which one is more efficient: select from linked server or insert into linked server?
Update: My own, recent experience confirms this. Pull if possible -- it will be much, much faster.
Try this on the other server:
INSERT INTO Local_Table
SELECT * FROM RemoteServer.RemoteDb.Remote_Table
The Import/Export wizard will be essentially doing this as a bulk insert, where as your code is not.
Assuming that you have a Clustered Index on the remote table, make sure that you have the same Clustered index on the local table, set Trace flag 610 globally on your remote server and make sure remote is in Simple or bulk logged recovery mode.
If you're remote table is a Heap (which will speed things up anyway), make sure your remote database is in simple or bulk logged mode change your code to read as follows:
INSERT INTO RemoteServer.RemoteDb..Remote_Table WITH(TABLOCK)
SELECT * FROM Local_Table WITH (nolock)
The reason why it's so slow to insert into the remote table from the local table is because it inserts a row, checks that it inserted, and then inserts the next row, checks that it inserted, etc.
Don't know if you figured this out or not, but here's how I solved this problem using linked servers.
First, I have a LocalDB.dbo.Table with several columns:
IDColumn (int, PK, Auto Increment)
TextColumn (varchar(30))
IntColumn (int)
And I have a RemoteDB.dbo.Table that is almost the same:
IDColumn (int)
TextColumn (varchar(30))
IntColumn (int)
The main difference is that remote IDColumn isn't set up as as an ID column, so that I can do inserts into it.
Then I set up a trigger on remote table that happens on Delete
Create Trigger Table_Del
On Table
After Delete
AS
Begin
Set NOCOUNT ON;
Insert Into Table (IDColumn, TextColumn, IntColumn)
Select IDColumn, TextColumn, IntColumn from MainServer.LocalDB.dbo.table L
Where not exists (Select * from Table R WHere L.IDColumn = R.IDColumn)
END
Then when I want to do an insert, I do it like this from the local server:
Insert Into LocalDB.dbo.Table (TextColumn, IntColumn) Values ('textvalue', 123);
Delete From RemoteServer.RemoteDB.dbo.Table Where IDColumn = 0;
--And if I want to clean the table out and make sure it has all the most up to date data:
Delete From RemoteServer.RemoteDB.dbo.Table
By triggering the remote server to pull the data from the local server and then do the insert, I was able to turn a job that took 30 minutes to insert 1258 lines into a job that took 8 seconds to do the same insert.
This does require a linked server connection on both sides, but after that's set up it works pretty good.
Update:
So in the last few years I've made some changes, and have moved away from the delete trigger as a way to sync the remote table.
Instead I have a stored procedure on the remote server that has all the steps to pull the data from the local server:
CREATE PROCEDURE [dbo].[UpdateTable]
-- Add the parameters for the stored procedure here
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON;
-- Insert statements for procedure here
--Fill Temp table
Insert Into WebFileNamesTemp Select * From MAINSERVER.LocalDB.dbo.WebFileNames
--Fill normal table from temp table
Delete From WebFileNames
Insert Into WebFileNames Select * From WebFileNamesTemp
--empty temp table
Delete From WebFileNamesTemp
END
And on the local server I have a scheduled job that does some processing on the local tables, and then triggers the update through the stored procedure:
EXEC sp_serveroption #server='REMOTESERVER', #optname='rpc', #optvalue='true'
EXEC sp_serveroption #server='REMOTESERVER', #optname='rpc out', #optvalue='true'
EXEC REMOTESERVER.RemoteDB.dbo.UpdateTable
EXEC sp_serveroption #server='REMOTESERVER', #optname='rpc', #optvalue='false'
EXEC sp_serveroption #server='REMOTESERVER', #optname='rpc out', #optvalue='false'
If you must push data from the source to the target (e.g., for firewall or other permissions reasons), you can do the following:
In the source database, convert the recordset to a single XML string (i.e., multiple rows and columns combined into a single XML string).
Then push that XML over as a single row (as a varchar(max), since XML isn't allowed over linked databases in SQL Server).
DECLARE #xml XML
SET #xml = (select * from SourceTable FOR XML path('row'))
Insert into TempTargetTable values (cast(#xml AS VARCHAR(max)))
In the target database, cast the varchar(max) as XML and then use XML parsing to turn that single row and column back into a normal recordset.
DECLARE #X XML = (select '<toplevel>' + ImportString + '</toplevel>' from TempTargetTable)
DECLARE #iX INT
EXEC sp_xml_preparedocument #ix output, #x
insert into TargetTable
SELECT [col1],
[col2]
FROM OPENXML(#iX, '//row', 2)
WITH ([col1] [int],
[col2] [varchar](128)
)
EXEC sp_xml_removedocument #iX
I've found a workaround. Since I'm not a big fun of GUI tools like SSIS, I've reused a bcp script to load table into csv and vice versa. Yeah, it's an odd case to have the bulk operation support for files, but tables. Feel free to edit the following script to fit your needs:
exec xp_cmdshell 'bcp "select * from YourLocalTable" queryout C:\CSVFolder\Load.csv -w -T -S .'
exec xp_cmdshell 'bcp YourAzureDBName.dbo.YourAzureTable in C:\CSVFolder\Load.csv -S yourdb.database.windows.net -U youruser#yourdb.database.windows.net -P yourpass -q -w'
Pros:
No need to define table structures every time.
I've tested and it worked way faster than inserting directly through
the LinkedServer.
It's easier to manage than XML (which is limited to
varchar(max) length anyway).
No need of an extra layout of abstraction (tools like SSIS).
Cons:
Using the external tool bcp through the xp_cmdshell interface.
Table properties will be lost after ex/im-poring csv (i.e. datatype, nulls,length, separator within value, etc).

Altering user-defined table types in SQL Server

How can I alter a user-defined table type in SQL Server ?
As of my knowledge it is impossible to alter/modify a table type.You
can create the type with a different name and then drop the old type
and modify it to the new name
Credits to jkrajes
As per msdn, it is like 'The user-defined table type definition cannot be modified after it is created'.
This is kind of a hack, but does seem to work. Below are the steps and an example of modifying a table type. One note is the sp_refreshsqlmodule will fail if the change you made to the table type is a breaking change to that object, typically a procedure.
Use sp_rename to rename the table type, I typically just add z to
the beginning of the name.
Create a new table type with the original name and any modification
you need to make to the table type.
Step through each dependency and run sp_refreshsqlmodule on it.
Drop the renamed table type.
EXEC sys.sp_rename 'dbo.MyTableType', 'zMyTableType';
GO
CREATE TYPE dbo.MyTableType AS TABLE(
Id INT NOT NULL,
Name VARCHAR(255) NOT NULL
);
GO
DECLARE #Name NVARCHAR(776);
DECLARE REF_CURSOR CURSOR FOR
SELECT referencing_schema_name + '.' + referencing_entity_name
FROM sys.dm_sql_referencing_entities('dbo.MyTableType', 'TYPE');
OPEN REF_CURSOR;
FETCH NEXT FROM REF_CURSOR INTO #Name;
WHILE (##FETCH_STATUS = 0)
BEGIN
EXEC sys.sp_refreshsqlmodule #name = #Name;
FETCH NEXT FROM REF_CURSOR INTO #Name;
END;
CLOSE REF_CURSOR;
DEALLOCATE REF_CURSOR;
GO
DROP TYPE dbo.zMyTableType;
GO
WARNING:
This can be destructive to your database, so you'll want to test this on a development environment first.
Here are simple steps that minimize tedium and don't require error-prone semi-automated scripts or pricey tools.
Keep in mind that you can generate DROP/CREATE statements for multiple objects from the Object Explorer Details window (when generated this way, DROP and CREATE scripts are grouped, which makes it easy to insert logic between Drop and Create actions):
Back up you database in case anything goes wrong!
Automatically generate the DROP/CREATE statements for all dependencies (or generate for all "Programmability" objects to eliminate the tedium of finding dependencies).
Between the DROP and CREATE [dependencies] statements (after all DROP, before all CREATE), insert generated DROP/CREATE [table type] statements, making the changes you need with CREATE TYPE.
Run the script, which drops all dependencies/UDTTs and then recreates [UDTTs with alterations]/dependencies.
If you have smaller projects where it might make sense to change the infrastructure architecture, consider eliminating user-defined table types. Entity Framework and similar tools allow you to move most, if not all, of your data logic to your code base where it's easier to maintain.
To generate the DROP/CREATE statements for multiple objects, you can right-click your Database > Tasks > Generate Scripts... (as shown in the screenshot below). Notice:
DROP statements are before CREATE statements
DROP statements are in dependency order (i.e. reverse of CREATE)
CREATE statements are in dependency order
Simon Zeinstra has found the solution!
But, I used Visual Studio community 2015 and I didn't even have to use schema compare.
Using SQL Server Object Explorer, I found my user-defined table type in the DB. I right-mouse clicked on the table-type and selected . This opened a code tab in the IDE with the TSQL code visible and editable. I simply changed the definition (in my case just increased the size of an nvarchar field) and clicked the Update Database button in the top-left of the tab.
Hey Presto! - a quick check in SSMS and the udtt definition has been modified.
Brilliant - thanks Simon.
If you can use a Database project in Visual Studio, you can make your changes in the project and use schema compare to synchronize the changes to your database.
This way, dropping and recreating the dependent objects is handled by the change script.
You should drop the old table type and create a new one. However if it has any dependencies (any stored procedures using it) you won't be able to drop it. I've posted another answer on how to automate the process of temporary dropping all stored procedures, modifying the table table and then restoring the stored procedures.
Just had to do this alter user defined table type in one of my projects. Here are the steps I employed:
Find all the SP using the user defined table type.
Save a create script for all the SP(s) found.
Drop the SP(s).
Save a create script for the user defined table you wish to alter.
4.5 Add the additional column or changes you need to the user defined table type.
Drop the user defined table type.
Run the create script for the user defined table type.
Run the create script for the SP(s).
Then start modifying the SP(s) accordingly.
you cant ALTER/MODIFY your TYPE. You have to drop the existing and re-create it with correct name/datatype or add a new column/s
I created two stored procedures for this. The first one
create_or_alter_udt_preprocess takes the udt name as input, drops all the stored procs/functions that use the udt, drops the udt, and return a sql script to recreate all the procedures/functions.
The second one
create_or_alter_udt_postprocess takes the script outputted from the first proc and executes it.
With the two procs, changing an udt can be done by:
call create_or_alter_udt_preprocess;
create the udt with a new definition;
call create_or_alter_udt_postprocess;
Use a transaction to avoid losing the original procs in case of errors.
create or ALTER proc create_or_alter_udt_postprocess(#udt_postprocess_data xml)
as
begin
if #udt_postprocess_data is null
return;
declare #obj_cursor cursor
set #obj_cursor = cursor fast_forward for
select n.c.value('.', 'nvarchar(max)') as definition
from #udt_postprocess_data.nodes('/Objects/definition') as n(c)
open #obj_cursor;
declare #definition nvarchar(max);
fetch next from #obj_cursor into #definition;
while (##fetch_status = 0)
begin
exec sp_executesql #stmt= #definition
fetch next from #obj_cursor into #definition
end
CLOSE #obj_cursor;
DEALLOCATE #obj_cursor;
end
Create or ALTER proc create_or_alter_udt_preprocess(#udt nvarchar(200), #udt_postprocess_data xml out)
AS
BEGIN
set #udt_postprocess_data = null;
if TYPE_ID(#udt) is null
return;
declare #drop_scripts nvarchar(max);
SELECT #drop_scripts = (
(select N';'+ drop_script
from
(
SELECT
drop_script = N'drop ' + case sys.objects.type when 'P' then N'proc ' else N'function' end
+ sys.objects.name + N';' + + nchar(10) + nchar(13)
FROM sys.sql_expression_dependencies d
JOIN sys.sql_modules m ON m.object_id = d.referencing_id
JOIN sys.objects ON sys.objects.object_id = m.object_id
WHERE referenced_id = TYPE_ID(#udt)
) dependencies
FOR XML PATH (''), type
).value('.', 'nvarchar(max)')
) ;
declare #postprocess_data xml;
set #udt_postprocess_data =
(SELECT
definition
FROM sys.sql_expression_dependencies d
JOIN sys.sql_modules m ON m.object_id = d.referencing_id
JOIN sys.objects ON sys.objects.object_id = m.object_id
WHERE referenced_id = TYPE_ID(#udt)
FOR XML PATH (''), root('Objects'));
exec sp_executesql #stmt= #drop_scripts;
exec sp_droptype #udt;
END
Example usage:
begin tran
declare #udt_postprocess_data xml;
exec create_or_alter_udt_preprocess #udt= 'test_list', #udt_postprocess_data = #udt_postprocess_data out;
CREATE TYPE test_list AS TABLE(
test_name nvarchar(50) NULL
);
exec create_or_alter_udt_postprocess #udt_postprocess_data = #udt_postprocess_data;
commit;
Code to set up the example usage:
CREATE TABLE [dbo].[test_table](
[test_id] [int] IDENTITY(1,1) NOT NULL, [test_name] [varchar](20) NULL
) ON [USERDATA]
GO
CREATE TYPE test_list AS TABLE(test_name nvarchar(20) NULL)
GO
create proc add_tests(
#test_list test_list readonly)
as
begin
SET NOCOUNT ON;
insert into test_table(test_name)
select test_name
from #test_list;
end;
create proc add_tests2(
#test_list test_list readonly)
as
begin
SET NOCOUNT ON;
insert into test_table(test_name)
select test_name
from #test_list;
end;

Resources