Building extended properties from existing table - sql-server

I need to document metadata for the fields of multiple tables. I intend to do this by appending the tables with extended properties. The only way I've done this in the past is using the built-in stored procedure which looks like this:
ALTER procedure [sys].[sp_addextendedproperty]
#name sysname,
#value sql_variant = NULL,
#level0type varchar(128) = NULL,
#level0name sysname = NULL,
#level1type varchar(128) = NULL,
#level1name sysname = NULL,
#level2type varchar(128) = NULL,
#level2name sysname = NULL
as
declare #ret int
if datalength(#value) > 7500
begin
raiserror(15097,-1,-1)
return 1
end
if #name is null
begin
raiserror(15600,-1,-1,'sp_addextendedproperty')
return (1)
end
execute #ret = sys.sp_validname #name
if (#ret <> 0)
begin
raiserror(15600,-1,-1,'sp_addextendedproperty')
return (1)
end
BEGIN TRANSACTION
begin
EXEC %%ExtendedPropertySet().AddValue(Name = #name, Value =
#value, Level0type = #level0type, Level0name = #level0name, Level1type =
#level1type, Level1name = #level1name, Level2type = #level2type,
Level2name = #level2name)
IF ##error <> 0
begin
COMMIT TRANSACTION
return (1)
end
end
COMMIT TRANSACTION
return (0)
The way I've done this in the past is by just passing strings to the variables:
EXEC sys.sp_addextendedproperty
#name=N'desc_en',
#value=N'Sex. 1 denotes male, 2 denotes female.' ,
#level0type=N'SCHEMA',
#level0name=N'dbo',
#level1type=N'TABLE',
#level1name=N'MyTable',
#level2type=N'COLUMN',
#level2name=N'sex'
GO
But now that I have to do this for multiple tables (and ideally write some re-usable procedure), I'm attempting to write a function that takes dynamic (i.e. SELECT) arguments.
For instance, take the following tables. The first has a couple of fields that need extended properties, whereas the other table has two fields containing that information:
dbo.users:
id | name | sex | year
-------------------------
1 | 'john' | 1 | 2019
2 | 'jane' | 2 | 2019
dbo.metadata:
id | property_en | desc_en
-------------------------
1 | sex | Sex. 1 denotes male, 2 denotes female
2 | year | The year of the event record
desired ep:
EXEC sys.sp_addextendedproperty
#name=N'desc_en',
#value=N'Sex. 1 denotes male, 2 denotes female' ,
#level0type=N'SCHEMA',
#level0name=N'dbo',
#level1type=N'TABLE',
#level1name=N'users',
#level2type=N'COLUMN',
#level2name=N'sex'
Any suggestions to derive EPs directly from a source and metadata table like that as an iteration? The first set of arguments have to come from the source table itself (i.e., information about the schema, table name and column name), whereas the second from the metadata table itself (i.e. property_en and desc_en).
Essentially, the first table (t1) provides:
#level0name
#level1name
And t2:
#name
#value
And the join is on #level2name, in other words where t1.clmns.name = t2.property_en

Here we go. Dynamic sql like this is a bit tricky if you haven't done it very much. In essence I am just using the system tables to get the information needed based on the data in your metadata table. Then we just build up a big sql string with a bunch of stored proc calls in it. Finally we just fire it off to run.
Please note, this will find any column that matches in any table based on the name in metadata. If you need to refine that for something like only tables with both columns or something you will need to adjust this a little bit.
create table metadata
(
id int
, property_en sysname
, desc_en varchar(500)
)
insert metadata values
(1, 'sex', 'Sex. 1 denotes male, 2 denotes female')
, (2, 'year', 'The year of the event record')
declare #SQL nvarchar(max) = ''
select #SQL = #SQL + 'EXEC sys.sp_addextendedproperty #name=N''' + d.property_en + ''', #value=N''' + d.desc_en + ''', #level0type=N''SCHEMA'', #level0name=N''' + s.name + ''', #level1type=N''TABLE'', #level1name=N''' + t.name + ''', #level2type=N''COLUMN'', #level2name=N''' + d.property_en + ''';'
from metadata d
join sys.columns c on c.name = d.property_en
join sys.tables t on t.object_id = c.object_id
join sys.schemas s on s.schema_id = t.schema_id
select #SQL
--once you are comfortable that the dynamic sql works uncomment the line below.
--exec sp_executesql #SQL

Here is an 'instead of' trigger based mechanism based on prior work by Cade Roux and "Phil Factor" (credits in the comments)
https://github.com/phrrngtn/rule4/blob/main/sql/extended_properties.sql
This creates some views RULE4.extended_property and RULE4.extended_property_aux that you can treat as normal table and insert/update/delete extended properties. The instead of trigger proxies this onto calls to the corresponding extended properties sproc.

Related

combine #sql query with temp table in SQL [duplicate]

In my stored procedure I declared two table variables on top of my procedure. Now I am trying to use that table variable within a dynamic sql statement but I get this error at the time of execution of that procedure. I am using Sql Server 2008.
This is how my query looks like,
set #col_name = 'Assoc_Item_'
+ Convert(nvarchar(2), #curr_row1);
set #sqlstat = 'update #RelPro set '
+ #col_name
+ ' = (Select relsku From #TSku Where tid = '
+ Convert(nvarchar(2), #curr_row1) + ') Where RowID = '
+ Convert(nvarchar(2), #curr_row);
Exec(#sqlstat);
And I get the following errors,
Must declare the table variable "#RelPro".
Must declare the table variable "#TSku".
I have tried to take the table outside of the string block of dynamic query but to no avail.
On SQL Server 2008+ it is possible to use Table Valued Parameters to pass in a table variable to a dynamic SQL statement as long as you don't need to update the values in the table itself.
So from the code you posted you could use this approach for #TSku but not for #RelPro
Example syntax below.
CREATE TYPE MyTable AS TABLE
(
Foo int,
Bar int
);
GO
DECLARE #T AS MyTable;
INSERT INTO #T VALUES (1,2), (2,3)
SELECT *,
sys.fn_PhysLocFormatter(%%physloc%%) AS [physloc]
FROM #T
EXEC sp_executesql
N'SELECT *,
sys.fn_PhysLocFormatter(%%physloc%%) AS [physloc]
FROM #T',
N'#T MyTable READONLY',
#T=#T
The physloc column is included just to demonstrate that the table variable referenced in the child scope is definitely the same one as the outer scope rather than a copy.
Your EXEC executes in a different context, therefore it is not aware of any variables that have been declared in your original context. You should be able to use a temp table instead of a table variable as shown in the simple demo below.
create table #t (id int)
declare #value nchar(1)
set #value = N'1'
declare #sql nvarchar(max)
set #sql = N'insert into #t (id) values (' + #value + N')'
exec (#sql)
select * from #t
drop table #t
You don't have to use dynamic SQL
update
R
set
Assoc_Item_1 = CASE WHEN #curr_row = 1 THEN foo.relsku ELSE Assoc_Item_1 END,
Assoc_Item_2 = CASE WHEN #curr_row = 2 THEN foo.relsku ELSE Assoc_Item_2 END,
Assoc_Item_3 = CASE WHEN #curr_row = 3 THEN foo.relsku ELSE Assoc_Item_3 END,
Assoc_Item_4 = CASE WHEN #curr_row = 4 THEN foo.relsku ELSE Assoc_Item_4 END,
Assoc_Item_5 = CASE WHEN #curr_row = 5 THEN foo.relsku ELSE Assoc_Item_5 END,
...
from
(Select relsku From #TSku Where tid = #curr_row1) foo
CROSS JOIN
#RelPro R
Where
R.RowID = #curr_row;
You can't do this because the table variables are out of scope.
You would have to declare the table variable inside the dynamic SQL statement or create temporary tables.
I would suggest you read this excellent article on dynamic SQL.
http://www.sommarskog.se/dynamic_sql.html
Well, I figured out the way and thought to share with the people out there who might run into the same problem.
Let me start with the problem I had been facing,
I had been trying to execute a Dynamic Sql Statement that used two temporary tables I declared at the top of my stored procedure, but because that dynamic sql statment created a new scope, I couldn't use the temporary tables.
Solution:
I simply changed them to Global Temporary Variables and they worked.
Find my stored procedure underneath.
CREATE PROCEDURE RAFCustom_Room_GetRelatedProducts
-- Add the parameters for the stored procedure here
#PRODUCT_SKU nvarchar(15) = Null
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON;
IF OBJECT_ID('tempdb..##RelPro', 'U') IS NOT NULL
BEGIN
DROP TABLE ##RelPro
END
Create Table ##RelPro
(
RowID int identity(1,1),
ID int,
Item_Name nvarchar(max),
SKU nvarchar(max),
Vendor nvarchar(max),
Product_Img_180 nvarchar(max),
rpGroup int,
Assoc_Item_1 nvarchar(max),
Assoc_Item_2 nvarchar(max),
Assoc_Item_3 nvarchar(max),
Assoc_Item_4 nvarchar(max),
Assoc_Item_5 nvarchar(max),
Assoc_Item_6 nvarchar(max),
Assoc_Item_7 nvarchar(max),
Assoc_Item_8 nvarchar(max),
Assoc_Item_9 nvarchar(max),
Assoc_Item_10 nvarchar(max)
);
Begin
Insert ##RelPro(ID, Item_Name, SKU, Vendor, Product_Img_180, rpGroup)
Select distinct zp.ProductID, zp.Name, zp.SKU,
(Select m.Name From ZNodeManufacturer m(nolock) Where m.ManufacturerID = zp.ManufacturerID),
'http://s0001.server.com/is/sw11/DG/' +
(Select m.Custom1 From ZNodeManufacturer m(nolock) Where m.ManufacturerID = zp.ManufacturerID) +
'_' + zp.SKU + '_3?$SC_3243$', ep.RoomID
From Product zp(nolock) Inner Join RF_ExtendedProduct ep(nolock) On ep.ProductID = zp.ProductID
Where zp.ActiveInd = 1 And SUBSTRING(zp.SKU, 1, 2) <> 'GC' AND zp.Name <> 'PLATINUM' AND zp.SKU = (Case When #PRODUCT_SKU Is Not Null Then #PRODUCT_SKU Else zp.SKU End)
End
declare #curr_row int = 0,
#tot_rows int= 0,
#sku nvarchar(15) = null;
IF OBJECT_ID('tempdb..##TSku', 'U') IS NOT NULL
BEGIN
DROP TABLE ##TSku
END
Create Table ##TSku (tid int identity(1,1), relsku nvarchar(15));
Select #curr_row = (Select MIN(RowId) From ##RelPro);
Select #tot_rows = (Select MAX(RowId) From ##RelPro);
while #curr_row <= #tot_rows
Begin
select #sku = SKU from ##RelPro where RowID = #curr_row;
truncate table ##TSku;
Insert ##TSku(relsku)
Select distinct top(10) tzp.SKU From Product tzp(nolock) INNER JOIN
[INTRANET].raf_FocusAssociatedItem assoc(nolock) ON assoc.associatedItemID = tzp.SKU
Where (assoc.isActive=1) And (tzp.ActiveInd = 1) AND (assoc.productID = #sku)
declare #curr_row1 int = (Select Min(tid) From ##TSku),
#tot_rows1 int = (Select Max(tid) From ##TSku);
If(#tot_rows1 <> 0)
Begin
While #curr_row1 <= #tot_rows1
Begin
declare #col_name nvarchar(15) = null,
#sqlstat nvarchar(500) = null;
set #col_name = 'Assoc_Item_' + Convert(nvarchar(2), #curr_row1);
set #sqlstat = 'update ##RelPro set ' + #col_name + ' = (Select relsku From ##TSku Where tid = ' + Convert(nvarchar(2), #curr_row1) + ') Where RowID = ' + Convert(nvarchar(2), #curr_row);
Exec(#sqlstat);
set #curr_row1 = #curr_row1 + 1;
End
End
set #curr_row = #curr_row + 1;
End
Select * From ##RelPro;
END
GO
I don't think that is possible (though refer to the update below); as far as I know a table variable only exists within the scope that declared it. You can, however, use a temp table (use the create table syntax and prefix your table name with the # symbol), and that will be accessible within both the scope that creates it and the scope of your dynamic statement.
UPDATE: Refer to Martin Smith's answer for how to use a table-valued parameter to pass a table variable in to a dynamic SQL statement. Also note the limitation mentioned: table-valued parameters are read-only.
Here is an example of using a dynamic T-SQL query and then extracting the results should you have more than one column of returned values (notice the dynamic table name):
DECLARE
#strSQLMain nvarchar(1000),
#recAPD_number_key char(10),
#Census_sub_code varchar(1),
#recAPD_field_name char(100),
#recAPD_table_name char(100),
#NUMBER_KEY varchar(10),
if object_id('[Permits].[dbo].[myTempAPD_Txt]') is not null
DROP TABLE [Permits].[dbo].[myTempAPD_Txt]
CREATE TABLE [Permits].[dbo].[myTempAPD_Txt]
(
[MyCol1] char(10) NULL,
[MyCol2] char(1) NULL,
)
-- an example of what #strSQLMain is : #strSQLMain = SELECT #recAPD_number_key = [NUMBER_KEY], #Census_sub_code=TEXT_029 FROM APD_TXT0 WHERE Number_Key = '01-7212'
SET #strSQLMain = ('INSERT INTO myTempAPD_Txt SELECT [NUMBER_KEY], '+ rtrim(#recAPD_field_name) +' FROM '+ rtrim(#recAPD_table_name) + ' WHERE Number_Key = '''+ rtrim(#Number_Key) +'''')
EXEC (#strSQLMain)
SELECT #recAPD_number_key = MyCol1, #Census_sub_code = MyCol2 from [Permits].[dbo].[myTempAPD_Txt]
DROP TABLE [Permits].[dbo].[myTempAPD_Txt]
Using Temp table solves the problem but I ran into issues using Exec so I went with the following solution of using sp_executesql:
Create TABLE #tempJoin ( Old_ID int, New_ID int);
declare #table_name varchar(128);
declare #strSQL nvarchar(3072);
set #table_name = 'Object';
--build sql sting to execute
set #strSQL='INSERT INTO '+#table_name+' SELECT '+#columns+' FROM #tempJoin CJ
Inner Join '+#table_name+' sourceTbl On CJ.Old_ID = sourceTbl.Object_ID'
**exec sp_executesql #strSQL;**

Email data from several tables which is joined with inserted table

I have four tables:
Activity:
ActivityID(PK) ActivityName CustomerID(FK) UserId(FK)
1 Lead Gen 1st 50 U1
2 Lead Gen 2nd 60 U2
Customer:
CustomerID(PK) CustomerNumber CustomerName
50 C0150 cust50 ltd
60 C0160 cust60 ltd
User:
UserID(PK) UserName Email
U1 Mr. X X#cat.com
U2 Mr. Y Y#cat.com
UserActivity:
UserActivityID(PK) UserID(FK) ActivityID(FK)
888 U1 1
889 U2 2
I want to send an email (i.e. Email:X#cat.com) to the users related to the activity (i.e. ActivityId:1) if any insert happens in Activity Table (SQL Server 2008-R2).
The email body should contain the ActivityId, ActivityName, CustomerNumber and CustomerName.
The trigger has to do the above mentioned and the result should be like this in the email:
ActivityID:1, ActivityName:Lead Gen 1st created for CustomerNumber: C0150 & CustomerName: cust50 ltd
Here is my code:
CREATE TRIGGER [dbo].[Activity_Insert_Mail_Notification]
ON [dbo].[Activity]
AFTER INSERT
AS
BEGIN
DECLARE #ActivityID varchar(2000)
DECLARE #ActivityName varchar (2000)
Select #ActivityID=inserted.ActivityID,#ActivityName=inserted.ActivityName
From inserted
DECLARE #CustomerNo varchar(2000)
DECLARE #CustomerName varchar(2000)
Select #CustomerNo = B.[CustomerNumber]
,#CustomerName= B.[CustomerName]
from [dbo].[Activity] A
inner join [dbo].[Customer] B
on A.[CustomerID]=B.[CustomerID]
DECLARE #email VARCHAR(2000)
SELECT #email = RTRIM(U.[Email]) + ';'
FROM [dbo].[Activity] A
left join [dbo].[UserActivity] UA
inner join [dbo].[User] U
on UA.[UserID]=U.[UserID]
on A.[ActivityID]=UA.[ActivityID]
WHERE U.[Email]<> ''
DECLARE #content varchar (2000)
= 'ActivityID:' + #ActivityId + ' '
+ ',ActivityName:' + #ActivityName + ' '
+ 'has been created for' + 'CustomerNumber: ' + #CustomerNo
+ ' ' + '&CustomerName: ' + #CustomerName
EXEC msdb.dbo.sp_send_dbmail
#profile_name = 'LEADNOTIFY'
,#recipients = #email
,#subject = 'New Lead Found'
,#body = #content
,#importance ='HIGH'
END
The problem is in my code that I can't fetch the customer data and email from the respective tables properly.
I have written some code below which will loop through all the effected rows and send an email for each.
However before you read that, I would highly recommend (as #HABO commented) on using a different approach. Triggers are fine for some tasks, but 2 key things you want to keep in mind when using triggers:
Ensure its obvious to anyone developing the system that there are triggers - there is nothing worse as a developer than finding stuff magically happening on what seems like a simple CRUD operation.
Do whatever you do in your trigger as fast as possible because you are holding locks which not only affect the current session, but could easily affect other users as well. Ideally therefore you want to be performing set-based operations, not RBAR (Row By Agonising Row) operations.
Sending emails is a terrible thing to do inside a trigger because its not uncommon to be forced to wait for an SMTP server to respond. If you wish to trigger emails, a better way is to use the trigger to insert the email data into a queuing table and then have a service elsewhere which de-queues these emails and sends them.
All that aside the following code shows one way to handle the Inserted pseudo-table when you want to perform RBAR operations. Because in SQL Server the Inserted pseudo-table (and the Deleted pseudo-table) will contain the number of rows effected by the operation i.e. 0-N. Also I've hopefully joined your tables to correctly obtain the required information.
CREATE TRIGGER [dbo].[Activity_Insert_Mail_Notification]
ON [dbo].[Activity]
AFTER INSERT
AS
BEGIN
SET NOCOUNT ON;
DECLARE #ActivityID VARCHAR(2000), #ActivityName VARCHAR(2000), #CustomerNo VARCHAR(2000), #CustomerName VARCHAR(2000), #Email VARCHAR(2000), #Content VARCHAR(2000);
-- Get all the relevant information into a temp table
SELECT ActivityID, ActivityName, C.CustomerNumber, C.CustomerName, RTRIM(U.[Email]) + ';' Email, CONVERT(BIT, 0) Handled
INTO #ActivityTriggerTemp
FROM Inserted I
INNER JOIN Customer C on C.CustomerID = I.CustomerID
INNER JOIN UserActivity UA on UA.ActivityID = I.ActivityID
INNER JOIN [USER] U on U.UserID = UA.UserID;
-- Loop through the temp table sending an email for each row, then setting the row as 'handled' to avoid sending it again.
WHILE EXISTS (SELECT 1 FROM #ActivityTriggerTemp WHERE Handled = 0) BEGIN
SELECT TOP 1 #ActivityID = ActivityID, #ActivityName = ActivityName, #CustomerNumber = CustomerNumber, #CustomerName = CustomerName, #Email = Email
FROM #ActivityTriggerTemp
WHERE Handled = 0;
-- Build the body of the email
set #Content = 'ActivityID:' + #ActivityId + ' '
+ ',ActivityName:' + #ActivityName + ' '
+ 'has been created for' + 'CustomerNumber: ' + #CustomerNo
+ ' ' + '&CustomerName: ' + #CustomerName;
-- Send the email
EXEC msdb.dbo.sp_send_dbmail
#profile_name = 'LEADNOTIFY'
, #recipients = #Email
, #subject = 'New Lead Found'
, #body = #Content
, #importance ='HIGH';
UPDATE #ActivityTriggerTemp SET
Handled = 1
WHERE ActivityID = #ActivityID AND ActivityName = #ActivityName AND CustomerNumber = #CustomerNumber AND CustomerName = #CustomerName AND Email = #Email;
END;
END

Determine segregating SQL column in known sets

I have a known target set of records in a large Oracle DB table that I want to pull. Rather than querying them by ID though, I want to find a a column within the table that holds a value(s) that is only assigned to the known set and not assigned to any records that are not in the set, thereby giving me a value to key off of.
Example:
Target IDs: 1, 2, 3
ID Color Dir Size
1 red up S
2 red up M
3 red down L
-----------------------
4 red left S
5 blue left S
6 red left M
7 red right M
8 blue right M
In this scenario, the solution I'm looking for is the "Dir" column, as values up and down are exclusive to records in the desirable set and cover the entire set.
The table I'm working with has 80,000+ records and 100+ columns, so I'm looking for a way to perform this investigation in an automated manner, whether it be by SQL script or with tools like SSIS/SSAS 2008, Excel, PowerShell, etc. What SQL functions and/or utilities can help in this process?
I know this is a long shot, but I have a solution for you that should work in SQL Server. Of course, the caveat being that you would need to get your data from Oracle into SQL Server first.
There are a couple methods listed here:
https://dba.stackexchange.com/questions/23782/what-is-the-easiest-way-to-move-data-from-oracle-to-sql-server
If you manage to do that, you can give this a try:
IF OBJECT_ID('TestTable1', 'U') IS NOT NULL DROP TABLE TestTable1;
CREATE TABLE TestTable1 (
ID INT IDENTITY(1, 1) PRIMARY KEY,
Color VARCHAR(10),
Dir VARCHAR(10),
Size VARCHAR(10)
);
INSERT INTO TestTable1 (Color, Dir, Size)
VALUES ('red', 'up', 'S'),
('red', 'up', 'M'),
('red', 'down', 'L'),
('red', 'left', 'S'),
('blue', 'left', 'S'),
('red', 'left', 'M'),
('red', 'right', 'M'),
('blue', 'right', 'M');
DECLARE #tableName sysname = 'TestTable1';
DECLARE #targetIDs VARCHAR(MAX) = '1, 2';
-- Get all of the columns associated with the specified table:
DECLARE #targetObjectID INT = OBJECT_ID(#tableName, 'U');
DECLARE #ColumnNames TABLE (
ID INT IDENTITY(1, 1) NOT NULL PRIMARY KEY,
ColumnName sysname NOT NULL,
HasOverlap BIT NULL
);
INSERT INTO #ColumnNames (ColumnName)
SELECT c.name
FROM sys.columns c
WHERE c.object_id = #targetObjectID
AND c.name <> 'ID';
-- Define a template to use for searching column values for overlap:
DECLARE #columnTestSQL NVARCHAR(MAX) = '
WITH TargetValues AS (
-- This produces a list of values for the target IDs:
SELECT DISTINCT <ColumnName> [Val]
FROM <TableName>
WHERE ID IN (<TargetIDList>)
)
SELECT #hasOverlap_OUT = 1
FROM <TableName> t
WHERE
-- Here we check for overlap with other IDs:
t.ID NOT IN (<TargetIDList>)
AND EXISTS (
SELECT 1
FROM TargetValues tv
WHERE tv.Val = t.<ColumnName>
);
';
SET #columnTestSQL = REPLACE(#columnTestSQL, '<TableName>', #tableName);
SET #columnTestSQL = REPLACE(#columnTestSQL, '<TargetIDList>', #targetIDs);
-- Set up for loop:
DECLARE #curID INT = 1;
DECLARE #maxID INT = (SELECT MAX(ID) FROM #ColumnNames);
DECLARE #curColumnName sysname;
DECLARE #curHasOverlap BIT;
DECLARE #curSQL NVARCHAR(MAX);
-- Go through each column:
WHILE #curID <= #maxID BEGIN
-- Initialize this iteration:
SELECT
#curColumnName = cn.ColumnName,
#curHasOverlap = 0
FROM #ColumnNames cn
WHERE cn.ID = #curID;
-- Use the template to generate a dynamic SQL statement specific to the current column:
SET #curSQL = REPLACE(#columnTestSQL, '<ColumnName>', #curColumnName);
-- Execute the dynamic SQL to check for overlap:
EXEC sp_executesql
#stmt = #curSQL,
#params = N'#hasOverlap_OUT BIT OUTPUT',
#hasOverlap_OUT = #curHasOverlap OUTPUT;
-- Record the results:
UPDATE #ColumnNames
SET HasOverlap = #curHasOverlap
WHERE ID = #curID;
SET #curID += 1;
END
-- Output a list of fields with no overlap:
SELECT ColumnName
FROM #ColumnNames
WHERE HasOverlap = 0;
I have to admit I was a little thrown off by the tags on this question at first. I thought I would go ahead and post this anyway in case it helps.

Retrieve column definition for stored procedure result set

I'm working with stored procedures in SQL Server 2008 and I've come to learn that I have to INSERT INTO a temp table that has been predefined in order to work with the data. That's fine, except how do I figure out how to define my temp table, if I'm not the one that wrote the stored procedure other than listing its definition and reading through the code?
For example, what would my temporary table look like for `EXEC sp_stored_procedure'? That is a simple stored procedure, and I could probably guess at the data types, but it seems there must be a way to just read the type and length of the columns returned from executing the procedure.
So let's say you have a stored procedure in tempdb:
USE tempdb;
GO
CREATE PROCEDURE dbo.my_procedure
AS
BEGIN
SET NOCOUNT ON;
SELECT foo = 1, bar = 'tooth';
END
GO
There is a quite convoluted way you can go about determining the metadata that the stored procedure will output. There are several caveats, including the procedure can only output a single result set, and that a best guess will be made about the data type if it can't be determined precisely. It requires the use of OPENQUERY and a loopback linked server with the 'DATA ACCESS' property set to true. You can check sys.servers to see if you already have a valid server, but let's just create one manually called loopback:
EXEC master..sp_addlinkedserver
#server = 'loopback',
#srvproduct = '',
#provider = 'SQLNCLI',
#datasrc = ##SERVERNAME;
EXEC master..sp_serveroption
#server = 'loopback',
#optname = 'DATA ACCESS',
#optvalue = 'TRUE';
Now that you can query this as a linked server, you can use the result of any query (including a stored procedure call) as a regular SELECT. So you can do this (note that the database prefix is important, otherwise you will get error 11529 and 2812):
SELECT * FROM OPENQUERY(loopback, 'EXEC tempdb.dbo.my_procedure;');
If we can perform a SELECT *, we can also perform a SELECT * INTO:
SELECT * INTO #tmp FROM OPENQUERY(loopback, 'EXEC tempdb.dbo.my_procedure;');
And once that #tmp table exists, we can determine the metadata by saying (assuming SQL Server 2005 or greater):
SELECT c.name, [type] = t.name, c.max_length, c.[precision], c.scale
FROM sys.columns AS c
INNER JOIN sys.types AS t
ON c.system_type_id = t.system_type_id
AND c.user_type_id = t.user_type_id
WHERE c.[object_id] = OBJECT_ID('tempdb..#tmp');
(If you're using SQL Server 2000, you can do something similar with syscolumns, but I don't have a 2000 instance handy to validate an equivalent query.)
Results:
name type max_length precision scale
--------- ------- ---------- --------- -----
foo int 4 10 0
bar varchar 5 0 0
In Denali, this will be much, much, much easier. Again there is still a limitation of the first result set but you don't have to set up a linked server and jump through all those hoops. You can just say:
DECLARE #sql NVARCHAR(MAX) = N'EXEC tempdb.dbo.my_procedure;';
SELECT name, system_type_name
FROM sys.dm_exec_describe_first_result_set(#sql, NULL, 1);
Results:
name system_type_name
--------- ----------------
foo int
bar varchar(5)
Until Denali, I suggest it would be easier to just roll up your sleeves and figure out the data types on your own. Not just because it's tedious to go through the above steps, but also because you are far more likely to make a correct (or at least more accurate) guess than the engine will, since the data type guesses the engine makes will be based on runtime output, without any external knowledge of the domain of possible values. This factor will remain true in Denali as well, so don't get the impression that the new metadata discovery features are a be-all end-all, they just make the above a bit less tedious.
Oh and for some other potential gotchas with OPENQUERY, see Erland Sommarskog's article here:
http://www.sommarskog.se/share_data.html#OPENQUERY
It looks like in SQL 2012 there is a new SP to help with this.
exec sp_describe_first_result_set N'PROC_NAME'
https://learn.microsoft.com/en-us/sql/relational-databases/system-stored-procedures/sp-describe-first-result-set-transact-sql
A less sophisticated way (that could be sufficient in some cases): edit your original SP, after the final SELECT and before the FROM clause add INSERT INTO tmpTable to save the SP result in tmpTable.
Run the modified SP, preferably with meaningful parameters in order to get actual data. Restore the original code of the procedure.
Now you can get the script of tmpTable from SQL server management studio or query sys.columns to get fields descriptions.
Here is some code that I wrote. The idea is (as someone else stated) is to get the SP code, modify it and execute it. However, my code does not change the original SP.
First step, get the definition of the SP, strip the 'Create' part out and get rid of the 'AS' after the declaration of parameters, if exists.
Declare #SPName varchar(250)
Set nocount on
Declare #SQL Varchar(max), #SQLReverse Varchar(MAX), #StartPos int, #LastParameterName varchar(250) = '', #TableName varchar(36) = 'A' + REPLACE(CONVERT(varchar(36), NewID()), '-', '')
Select * INTO #Temp from INFORMATION_SCHEMA.PARAMETERS where SPECIFIC_NAME = 'ADMIN_Sync_CompareDataForSync'
if ##ROWCOUNT > 0
BEGIN
Select #SQL = REPLACE(ROUTINE_DEFINITION, 'CREATE PROCEDURE [' + ROUTINE_SCHEMA + '].[' + ROUTINE_NAME + ']', 'Declare')
from INFORMATION_SCHEMA.ROUTINES
where ROUTINE_NAME = #SPName
Select #LastParameterName = PARAMETER_NAME + ' ' + DATA_TYPE +
CASE WHEN CHARACTER_MAXIMUM_LENGTH is not null THEN '(' +
CASE WHEN CHARACTER_MAXIMUM_LENGTH = -1 THEN 'MAX' ELSE CONVERT(varchar,CHARACTER_MAXIMUM_LENGTH) END + ')' ELSE '' END
from #Temp
WHERE ORDINAL_POSITION =
(Select MAX(ORDINAL_POSITION)
From #Temp)
Select #StartPos = CHARINDEX(#LastParameterName, REPLACE(#SQL, ' ', ' '), 1) + LEN(#LastParameterName)
END
else
Select #SQL = REPLACE(ROUTINE_DEFINITION, 'CREATE PROCEDURE [' + ROUTINE_SCHEMA + '].[' + ROUTINE_NAME + ']', '') from INFORMATION_SCHEMA.ROUTINES where ROUTINE_NAME = #SPName
DROP TABLE #Temp
Select #StartPos = CHARINDEX('AS', UPPER(#SQL), #StartPos)
Select #SQL = STUFF(#SQL, #StartPos, 2, '')
(Note the creation of a new table name based on a unique identifier)
Now find the last 'From' word in the code assuming this is the code that does the select that returns the result set.
Select #SQLReverse = REVERSE(#SQL)
Select #StartPos = CHARINDEX('MORF', UPPER(#SQLReverse), 1)
Change the code to select the resultset into a table (the table based on the uniqueidentifier)
Select #StartPos = LEN(#SQL) - #StartPos - 2
Select #SQL = STUFF(#SQL, #StartPos, 5, ' INTO ' + #TableName + ' FROM ')
EXEC (#SQL)
The result set is now in a table, it does not matter if the table is empty!
Lets get the structure of the table
Select * from INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = #TableName
You can now do your magic with this
Don't forget to drop that unique table
Select #SQL = 'drop table ' + #TableName
Exec (#SQL)
Hope this helps!
In order to get queryable resultset sys.dm_exec_describe_first_result_set(SQL Server 2012) could be used:
SELECT column_ordinal, name, system_type_name
FROM sys.dm_exec_describe_first_result_set(N'EXEC stored_procedure_name', NULL, 0);
db<>fiddle demo
This soultion has few limitations though for instance SP cannot use temporary tables.
If you are working in an environment with restricted rights where things like loopback linked server seems black magic and are definitely "no way!", but you have a few rights on schema and only a couple of stored procedure to process there is a very simple solution.
You can use the very helpful SELECT INTO syntax, which will create a new table with result set of a query.
Let's say your procedure contains the following Select query :
SELECT x, y, z
FROM MyTable t INNER JOIN Table2 t2 ON t.id = t2.id...
Instead replace it by :
SELECT x, y, z
INTO MyOutputTable
FROM MyTable t INNER JOIN Table2 t2 ON t.id = t2.id...
When you will execute it, it will create a new table MyOutputTable with the results returned by the query.
You just have to do a right click on its name to get the table definition.
That's all !
SELECT INTO only require the ability to create new tables and also works with temporary tables (SELECT... INTO #MyTempTable), but it could be harder to retrieve the definition.
However of course if you need to retrieve the output definition of a thousands SP, it's not the fastest way :)

How to detect interface break between stored procedure

I am working on a large project with a lot of stored procedures. I came into the following situation where a developer modified the arguments of a stored procedure which was called by another stored procedure.
Unfortunately, nothing prevents the ALTER PROC to complete.
Is there a way to perform those checks afterwards ?
What would be the guidelines to avoid getting into that kind of problems ?
Here is a sample code to reproduce this behavior :
CREATE PROC Test1 #arg1 int
AS
BEGIN
PRINT CONVERT(varchar(32), #arg1)
END
GO
CREATE PROC Test2 #arg1 int
AS
BEGIN
DECLARE #arg int;
SET #arg = #arg1+1;
EXEC Test1 #arg;
END
GO
EXEC Test2 1;
GO
ALTER PROC Test1 #arg1 int, #arg2 int AS
BEGIN
PRINT CONVERT(varchar(32), #arg1)
PRINT CONVERT(varchar(32), #arg2)
END
GO
EXEC Test2 1;
GO
DROP PROC Test2
DROP PROC Test1
GO
Sql server 2005 has a system view sys.sql_dependencies that tracks dependencies. Unfortunately, it's not all that reliable (For more info, see this answer). Oracle, however, is much better in that regard. So you could switch. There's also a 3rd party vendor, Redgate, who has Sql Dependency Tracker. Never tested it myself but there is a trial version available.
I have the same problem so I implemented my poor man's solution by creating a stored procedure that can search for strings in all the stored procedures and views in the current database. By searching on the name of the changed stored procedure I can (hopefully) find EXEC calls.
I used this on sql server 2000 and 2008 so it probably also works on 2005. (Note : #word1, #word2, etc must all be present but that can easily be changed in the last SELECT if you have different needs.)
CREATE PROCEDURE [dbo].[findWordsInStoredProceduresViews]
#word1 nvarchar(4000) = null,
#word2 nvarchar(4000) = null,
#word3 nvarchar(4000) = null,
#word4 nvarchar(4000) = null,
#word5 nvarchar(4000) = null
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON;
-- create temp table
create table #temp
(
id int identity(1,1),
Proc_id INT,
Proc_Name SYSNAME,
Definition NTEXT
)
-- get the names of the procedures that meet our criteria
INSERT #temp(Proc_id, Proc_Name)
SELECT id, OBJECT_NAME(id)
FROM syscomments
WHERE OBJECTPROPERTY(id, 'IsProcedure') = 1 or
OBJECTPROPERTY(id, 'IsView') = 1
GROUP BY id, OBJECT_NAME(id)
-- initialize the NTEXT column so there is a pointer
UPDATE #temp SET Definition = ''
-- declare local variables
DECLARE
#txtPval binary(16),
#txtPidx INT,
#curText NVARCHAR(4000),
#counterId int,
#maxCounterId int,
#counterIdInner int,
#maxCounterIdInner int
-- set up a double while loop to get the data from syscomments
select #maxCounterId = max(id)
from #temp t
create table #tempInner
(
id int identity(1,1),
curName SYSNAME,
curtext ntext
)
set #counterId = 0
WHILE (#counterId < #maxCounterId)
BEGIN
set #counterId = #counterId + 1
insert into #tempInner(curName, curtext)
SELECT OBJECT_NAME(s.id), text
FROM syscomments s
INNER JOIN #temp t
ON s.id = t.Proc_id
WHERE t.id = #counterid
ORDER BY s.id, colid
select #maxCounterIdInner = max(id)
from #tempInner t
set #counterIdInner = 0
while (#counterIdInner < #maxCounterIdInner)
begin
set #counterIdInner = #counterIdInner + 1
-- get the pointer for the current procedure name / colid
SELECT #txtPval = TEXTPTR(Definition)
FROM #temp
WHERE id = #counterId
-- find out where to append the #temp table's value
SELECT #txtPidx = DATALENGTH(Definition)/2
FROM #temp
WHERE id = #counterId
select #curText = curtext
from #tempInner
where id = #counterIdInner
-- apply the append of the current 8KB chunk
UPDATETEXT #temp.definition #txtPval #txtPidx 0 #curtext
end
truncate table #tempInner
END
-- check our filter
SELECT Proc_Name, Definition
FROM #temp t
WHERE (#word1 is null or definition LIKE '%' + #word1 + '%') AND
(#word2 is null or definition LIKE '%' + #word2 + '%') AND
(#word3 is null or definition LIKE '%' + #word3 + '%') AND
(#word4 is null or definition LIKE '%' + #word4 + '%') AND
(#word5 is null or definition LIKE '%' + #word5 + '%')
ORDER BY Proc_Name
-- clean up
DROP TABLE #temp
DROP TABLE #tempInner
END
You can use sp_refreshsqlmodule to attempt to re-validate SPs (this also updates dependencies), but it won't validate this particular scenario with parameters at the caller level (it will validate things like invalid columns in tables and views).
http://www.mssqltips.com/tip.asp?tip=1294 has a number of techniques, including sp_depends
Dependency information is stored in the SQL Server metadata, including parameter columns/types for each SP and function, but it isn't obvious how to validate all the calls, but it is possible to locate them and inspect them.

Resources