SELECT number of Logical CPU - sql-server

I have written query that allows me to have an overview of all Azure SQL Database inside an Azure SQL Server; this query needs to be run on master:
DECLARE #StartDate date = DATEADD(day, -30, GETDATE()) -- 14 Days
SELECT
database_name AS DatabaseName,
sysso.edition
,sysso.service_objective
,(SELECT TOP 1 storage_in_megabytes FROM sys.resource_stats AS rs2 WHERE rs2.database_name = rs1.database_name ORDER BY rs2.start_time DESC) AS StorageMB
,CAST(MAX(storage_in_megabytes) / 1024 AS DECIMAL(10, 2)) StorageGB
,MIN(end_time) AS StartTime
,MAX(end_time) AS EndTime
,CAST(AVG(avg_cpu_percent) AS decimal(4,2)) AS Avg_CPU
,MAX(avg_cpu_percent) AS Max_CPU
,(COUNT(database_name) - SUM(CASE WHEN avg_cpu_percent >= 40 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name) * 100 AS [CPU Fit %]
,CAST(AVG(avg_data_io_percent) AS decimal(4,2)) AS Avg_IO
,MAX(avg_data_io_percent) AS Max_IO
,(COUNT(database_name) - SUM(CASE WHEN avg_data_io_percent >= 40 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name) * 100 AS [Data IO Fit %]
,CAST(AVG(avg_log_write_percent) AS decimal(4,2)) AS Avg_LogWrite
,MAX(avg_log_write_percent) AS Max_LogWrite
,(COUNT(database_name) - SUM(CASE WHEN avg_log_write_percent >= 40 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name) * 100 AS [Log Write Fit %]
FROM sys.resource_stats AS rs1
inner join sys.databases dbs on rs1.database_name = dbs.name
INNER JOIN sys.database_service_objectives sysso on sysso.database_id = dbs.database_id
WHERE start_time > #StartDate
GROUP BY database_name, sysso.edition, sysso.service_objective
ORDER BY database_name , sysso.edition, sysso.service_objective
Here the output:
I would like to add to this query how many logical CPU each database has.
I know that there is query that I can run on each database but I want to SELECT how many logical CPU each database has by querying the master database.
Is there a way to grab such information form the sys. tables?
Or dynamic SQL is the only way to go?
EDIT:
#Charlieface suggested to use SELECT cpu_count FROM sys.dm_os_sys_info which is right but if I run it on master I only see the CPU used on master. I need to retrieve the CPUs used for each database.

You add the following subquery:
(SELECT cpu_count FROM sys.dm_os_sys_info)
See the documentation.
Further information for each processor can be got from sys.dm_os_nodes
It seems Azure Managed Instance only returns data for master. So you need to insert it into a temp table or table variable and join on that.
DECLARE #sql nvarchar(max);
SELECT #sql = STRING_AGG(CAST('
SELECT ' + d.id + ', cpu_count FROM ' + QUOTENAME(d.name) + '.sys.dm_os_sys_info
' AS nvarchar(max)), '
UNION ALL
' )
FROM sys.databases;
PRINT #sql; --your friend
DECLARE #tmp TABLE (db_id int PRIMARY KEY, cpu_count int NOT NULL);
INSERT #tmp (db_id, cpu_count)
EXEC sp_executesql #sql;
SELECT ....
cpu.cpu_count
FROM ...
LEFT JOIN #tmp cpu ON cpu.db_id = dbs.id;

Related

Is it always possible to transform multiple spatial selects with while loop and variables into a single query without using temp tables in sql?

This problem can be solved with temp table, however, I don't want to use Temp table or var table, this question is mostly for my personal educational purposes.
I inherited the following SQL:
DECLARE #i int = 993
while #i <=1000
begin
declare #lat nvarchar(20)
select top 1 #lat = SUBSTRING(Address,0,CHARINDEX(',',Address,0)) from dbo.rent
where id = #i;
declare #lon nvarchar(20)
select top 1 #lon = SUBSTRING(Address,CHARINDEX(',',Address)+1,LEN(Address)) from dbo.rent
where id = #i
declare #p GEOGRAPHY = GEOGRAPHY::STGeomFromText('POINT('+ #lat +' '+#lon+')', 4326)
select price/LivingArea sq_m, (price/LivingArea)/avg_sq_m, * from
(select (sum(price)/sum(LivingArea)) avg_sq_m, count(1) cnt, #i id from
(select *, GEOGRAPHY::STGeomFromText('POINT('+
convert(nvarchar(20), SUBSTRING(Address,0,CHARINDEX(',',Address,0)))+' '+
convert( nvarchar(20), SUBSTRING(Address,CHARINDEX(',',Address)+1,LEN(Address)))+')', 4326)
.STBuffer(500).STIntersects(#p) as [Intersects]
from dbo.rent
where Address is not null
) s
where [Intersects] = 1) prox
inner join dbo.rent r on prox.id = r.id
set #i = #i+1
end
it is used to analyze property prices per square meter that are in proximity and compare them to see which ones are cheaper...
Problem: a mechanism for calling has to be moved from C# to SQL and all queries have to be combined into a single result (now you get one row per one while run), i.e #i and #p has to go and become while id < x and id > y or somehow magically joined,
the procedure is a cut down version of actual thing but having a solution to the above I will have no problem making the whole thing work...
I am of the opinion that any SQL mechanism with variables and loops can be transformed to a single SQL statement, hence the question.
SqlFiddle
If I understand your question properly (Remove the need for loops and return one data set) then you can use CTE (Common Table Expressions) for the Lats, Lons and Geog variables.
You;re SQLFIddle was referencing a database called "webanalyser" so I removed that from the query below
However, the query will not return anything as the sample data has wrong data for address column.
;WITH cteLatsLongs
AS(
SELECT
lat = SUBSTRING(Address, 0, CHARINDEX(',', Address, 0))
,lon = SUBSTRING(Address, CHARINDEX(',', Address) + 1, LEN(Address))
FROM dbo.rent
)
,cteGeogs
AS(
SELECT
Geog = GEOGRAPHY ::STGeomFromText('POINT(' + LL.lat + ' ' + LL.lon + ')', 4326)
FROM cteLatsLongs LL
),cteIntersects
AS(
SELECT *,
GEOGRAPHY::STGeomFromText('POINT(' + CONVERT(NVARCHAR(20), SUBSTRING(Address, 0, CHARINDEX(',', Address, 0))) + ' ' + CONVERT(NVARCHAR(20), SUBSTRING(Address, CHARINDEX(',', Address) + 1, LEN(Address))) + ')', 4326).STBuffer(500).STIntersects(G.Geog) AS [Intersects]
FROM dbo.rent
CROSS APPLY cteGeogs G
)
SELECT avg_sq_m = (SUM(price) / SUM(LivingArea)), COUNT(1) cnt
FROM
cteIntersects I
WHERE I.[Intersects] = 1
It can be done, in this specific case 'discovery' that was necessary was the ability to perform JOINs on Point e.g ability to join tables on proximity (another a small cheat was to aggregate point-strings to actual points, but it's just an optimization). Once this is done, a query could be rewritten as follows:
SELECT adds.Url,
adds.Price/adds.LivingArea Sqm,
(adds.Price/adds.LivingArea)/k1.sale1Avg ratio,
*
FROM
(SELECT baseid,
count(k1Rent.rentid) rent1kCount,
sum(k1Rent.RperSqM)/(count(k1Rent.rentid)) AS rent1kAvgSqM,
count(around1k.SaleId) sale1kCount,
(sum(k1sale.price)/sum(k1Sale.LivingArea)) sale1Avg,
(sum(k1sale.price)/sum(k1Sale.LivingArea))/((sum(k1Rent.RperSqM)/(count(k1Rent.rentid)))*12) years --*
FROM
(SELECT sa.id baseid,
s.id saleid,
s.RoomCount,
POINT
FROM SpatialAnalysis sa
INNER JOIN Sale s ON s.Id = SaleId
WHERE sa.SalesIn1kRadiusCount IS NULL) AS base
JOIN SpatialAnalysis around1k ON base.Point.STBuffer(1000).STIntersects(around1k.Point) = 1
LEFT OUTER JOIN
(SELECT id rentid,
rc,
Price/avgRoomSize RperSqM
FROM
(SELECT *
FROM
(SELECT rc,
sum(avgArea*c)/sum(c) avgRoomSize
FROM
(SELECT roomcount rc,
avg(livingarea) avgArea,
count(1) c
FROM Rent
WHERE url LIKE '%systemname%'
AND LivingArea IS NOT NULL
GROUP BY RoomCount
UNION
(SELECT roomcount rc,
avg(livingarea) avgArea,
count(1) c
FROM sale
WHERE url LIKE '%systemname%'
AND LivingArea IS NOT NULL
GROUP BY RoomCount))uni
GROUP BY rc) avgRoom) avgrents
JOIN rent r ON r.RoomCount = avgrents.rc) k1Rent ON k1Rent.rentid =around1k.RentId
AND base.RoomCount = k1Rent.rc
LEFT OUTER JOIN Sale k1Sale ON k1Sale.Id = around1k.SaleId
AND base.RoomCount = k1Sale.RoomCount
GROUP BY baseid) k1
left outer join SpatialAnalysis sp on sp.Id = baseid
left outer join Sale adds on adds.Id = sp.SaleId
where adds.Price < 250000
order by years, ratio

SQL Server : efficient way to find missing Ids

I am using SQL Server to store tens of millions of records. I need to be able to query its tables to find missing rows where there are gaps in the Id column, as there should be none.
I am currently using a solution that I have found here on StackOverflow:
CREATE PROCEDURE [dbo].[find_missing_ids]
#Table NVARCHAR(128)
AS
BEGIN
DECLARE #query NVARCHAR(MAX)
SET #query = 'WITH Missing (missnum, maxid) '
+ N'AS '
+ N'('
+ N' SELECT 1 AS missnum, (select max(Id) from ' + #Table + ') '
+ N' UNION ALL '
+ N' SELECT missnum + 1, maxid FROM Missing '
+ N' WHERE missnum < maxid '
+ N') '
+ N'SELECT missnum '
+ N'FROM Missing '
+ N'LEFT OUTER JOIN ' + #Table + ' tt on tt.Id = Missing.missnum '
+ N'WHERE tt.Id is NULL '
+ N'OPTION (MAXRECURSION 0);';
EXEC sp_executesql #query
END;
This solution has been working very well, but it has been getting slower and more resource intensive as the tables have grown. Now, running the procedure on a table of 38 million rows is taking about 3.5 minutes and lots of CPU.
Is there a more efficient way to perform this? After a certain range has been found to not contain any missing Ids, I no longer need to check that range again.
JBJ's answer is almost complete. The query needs to return the From and Through for each range of missing values.
select B+1 as [From],A-1 as[Through]from
(select StuffID as A,
lag(StuffID)over(order by StuffID)as B from Stuff)z
where A<>B+1
order by A
I created a test table with 50 million records, then deleted a few. The first row of the result is:
From Through
33 35
This indicates that all IDs in the range from 33 through 35 are missing, i.e. 33, 34 and 35.
On my machine the query took 37 seconds.
try
select pId
from (select Id, lag(Id) over (order by Id) pId from yourschema.yourtable) e
where pId <> (Id-1)
order by Id
replacing yourschema.yourtable with the appropriate table information
Try this solution, it will be faster than CTE.
;WITH CTE AS
(
SELECT ROW_NUMBER()
OVER (
ORDER BY (SELECT NULL)) RN
FROM ( values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)) v(id) --10 ROWS
CROSS JOIN ( values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)) v1(id)--100 ROWS
CROSS JOIN ( values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)) v2(id) --1000 ROWS
CROSS JOIN ( values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)) v3(id) --10000 ROWS
CROSS JOIN ( values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)) v4(id)--100000 ROWS
CROSS JOIN ( values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)) v5(id)--1000000 ROWS
)
SELECT RN AS Missing
FROM CTE C
LEFT JOIN YOURABLE T ON T.ID=R.ID
WHERE T.ID IS NULL
If you want you can use master..[spt_values] also to generate the number like following.
SELECT (ROW_NUMBER() OVER (ORDER BY (SELECT NULL))) RN
FROM master..[spt_values] T1
CROSS JOIN (select top 500 * from master..[spt_values]) T2
Above query will generate 1268500 numbers
Note: You need to add the CROSS JOIN as per your requirement.

PyQT: SQL Query runs, but doesn't display output in QTableView for a larger query

I am creating an application in PyQt where in on clicking a button, the query runs and returns the output in QTableView. This is the code with which I am trying to achieve this:
self.table2 = QTableView()
projectModel1.setQuery("select * from xyz")
self.table2.setModel(projectModel1)
My problem is that this seems to work perfectly for a relatively small query, but when I replace the query with a larger query which creates a table in my database, it returns no output (let's call this large query as 'Query A'). But when I check my database, I see that the table was created, which means 'Query A' ran but didn't return the result. And also doesn't allow any other query after 'Query A' to run.
I have tried running the query in SQL and the 'Query A' works fine, so that is not the issue.
What I want to know is that is there some restriction like that on the query size? And why is that even the queries after this query don't run?
This is the 'Query A' to understand the complexity of the query:
Declare #d as datetime, #w as INT, #y as INT, #x as INT, #z as Int, #yy as INT, #maxweek as Int, #ww as Int, #dd as datetime, #t as varchar(20) SET #d= getdate() set #t ='/12/31' SET #w = (datepart(DY, datediff(d, 0, #d) / 7 * 7 + 3)+6) / 7 SET #w=#w-4 SET #y =year(getdate()) SET #x=#y*100+#w if #w <= 0 Begin set #yy = year(getdate())-1 set #dd = cast(cast(#yy as Varchar(10))+ #t + ' 11:11:11' as datetime) set #maxweek = (datepart(DY, datediff(d, 0, #dd) / 7 * 7 + 3)+6) / 7 set #ww= #maxweek + #w set #w= 1 set #y=#yy set #z=#yy*100+#ww set #x=#y*100+#w end else set #z=#x
SELECT s.Name, s.CountryId, AVG(s.Timecalc) as MedianBusLOI FROM (
SELECT imp.name ,Week, i.countryId,
datediff(ss, r.Begin, r.End) As Timecalc,
ROW_NUMBER() OVER (PARTITION BY imp.name ,Week, dataleverantor ORDER BY datediff(ss, r.Begin, r.End) ASC, Foreignid ASC) AS RowAsc,
ROW_NUMBER() OVER (PARTITION BY imp.name ,Week, dataleverantor ORDER BY datediff(ss, r.SurveyBegin, r.SurveyEnd) DESC, Foreignid DESC) AS RowDesc
FROM People r inner join Imports imp on imp.id=r.importid
inner join interview i on i.id = r.InternalRespondentId
WHERE imp.name is not null and r.year*100 + r.week >= #x and r.year*100 + r.week >= #z ) s
WHERE s.RowAsc IN (s.RowDesc, s.RowDesc - 1, s.RowDesc + 1) GROUP BY s.name, countryId
All that this query does is that it calculates the median time taken by a person to complete filling a form. The query has no issues, it works perfectly.

Listing information about all database files in SQL Server

Is it possible to list information about the files (MDF/LDF) of all databases on an SQL Server?
I'd like to get a list showing which database is using what files on the local disk.
What I tried:
exec sp_databases all databases
select * from sys.databases shows a lot of information about each database - but unfortunately it doesn't show the files used by each database.
select * from sys.database_files shows the mdf/ldf files of the master database - but not the other databases
You can use sys.master_files.
Contains a row per file of a database as stored in the master
database. This is a single, system-wide view.
If you want get location of Database you can check Get All DBs Location.
you can use sys.master_files for get location of db and sys.database to get db name
SELECT
db.name AS DBName,
type_desc AS FileType,
Physical_Name AS Location
FROM
sys.master_files mf
INNER JOIN
sys.databases db ON db.database_id = mf.database_id
I am using script to get empty space in each file:
Create Table ##temp
(
DatabaseName sysname,
Name sysname,
physical_name nvarchar(500),
size decimal (18,2),
FreeSpace decimal (18,2)
)
Exec sp_msforeachdb '
Use [?];
Insert Into ##temp (DatabaseName, Name, physical_name, Size, FreeSpace)
Select DB_NAME() AS [DatabaseName], Name, physical_name,
Cast(Cast(Round(cast(size as decimal) * 8.0/1024.0,2) as decimal(18,2)) as nvarchar) Size,
Cast(Cast(Round(cast(size as decimal) * 8.0/1024.0,2) as decimal(18,2)) -
Cast(FILEPROPERTY(name, ''SpaceUsed'') * 8.0/1024.0 as decimal(18,2)) as nvarchar) As FreeSpace
From sys.database_files
'
Select * From ##temp
drop table ##temp
Size is expressed in KB.
I've created this query:
SELECT
db.name AS [Database Name],
mf.name AS [Logical Name],
mf.type_desc AS [File Type],
mf.physical_name AS [Path],
CAST(
(mf.Size * 8
) / 1024.0 AS DECIMAL(18, 1)) AS [Initial Size (MB)],
'By '+IIF(
mf.is_percent_growth = 1, CAST(mf.growth AS VARCHAR(10))+'%', CONVERT(VARCHAR(30), CAST(
(mf.growth * 8
) / 1024.0 AS DECIMAL(18, 1)))+' MB') AS [Autogrowth],
IIF(mf.max_size = 0, 'No growth is allowed', IIF(mf.max_size = -1, 'Unlimited', CAST(
(
CAST(mf.max_size AS BIGINT) * 8
) / 1024 AS VARCHAR(30))+' MB')) AS [MaximumSize]
FROM
sys.master_files AS mf
INNER JOIN sys.databases AS db ON
db.database_id = mf.database_id
Executing following sql (It will only work when you don't have multiple mdf/ldf files for same database)
SELECT
db.name AS DBName,
(select mf.Physical_Name FROM sys.master_files mf where mf.type_desc = 'ROWS' and db.database_id = mf.database_id ) as DataFile,
(select mf.Physical_Name FROM sys.master_files mf where mf.type_desc = 'LOG' and db.database_id = mf.database_id ) as LogFile
FROM sys.databases db
will return this output
DBName DataFile LogFile
--------------------------------------------------------------------------------
master C:\....\master.mdf C:\....\mastlog.ldf
tempdb C:\....\tempdb.mdf C:\....\templog.ldf
model C:\....\model.mdf C:\....\modellog.ldf
and rest of the databases
If your TempDB's have multiple MDF's (like mine have), this script will fail.
However, you can use
WHERE db.database_id > 4
at the end and it will return all databases except system databases.
You can also try this.
select db_name(dbid) dbname, filename from sys.sysaltfiles
Below script can be used to get following information:
1. DB Size Info
2. FileSpaceInfo
3. AutoGrowth
4. Recovery Model
5. Log_reuse_backup information
CREATE TABLE #tempFileInformation
(
DBNAME NVARCHAR(256),
[FILENAME] NVARCHAR(256),
[TYPE] NVARCHAR(120),
FILEGROUPNAME NVARCHAR(120),
FILE_LOCATION NVARCHAR(500),
FILESIZE_MB DECIMAL(10,2),
USEDSPACE_MB DECIMAL(10,2),
FREESPACE_MB DECIMAL(10,2),
AUTOGROW_STATUS NVARCHAR(100)
)
GO
DECLARE #SQL VARCHAR(2000)
SELECT #SQL = '
USE [?]
INSERT INTO #tempFileInformation
SELECT
DBNAME =DB_NAME(),
[FILENAME] =A.NAME,
[TYPE] = A.TYPE_DESC,
FILEGROUPNAME = fg.name,
FILE_LOCATION =a.PHYSICAL_NAME,
FILESIZE_MB = CONVERT(DECIMAL(10,2),A.SIZE/128.0),
USEDSPACE_MB = CONVERT(DECIMAL(10,2),(A.SIZE/128.0 - ((A.SIZE - CAST(FILEPROPERTY(A.NAME,''SPACEUSED'') AS INT))/128.0))),
FREESPACE_MB = CONVERT(DECIMAL(10,2),(A.SIZE/128.0 - CAST(FILEPROPERTY(A.NAME,''SPACEUSED'') AS INT)/128.0)),
AUTOGROW_STATUS = ''BY '' +CASE is_percent_growth when 0 then cast (growth/128 as varchar(10))+ '' MB - ''
when 1 then cast (growth as varchar(10)) + ''% - '' ELSE '''' END
+ CASE MAX_SIZE WHEN 0 THEN '' DISABLED ''
WHEN -1 THEN '' UNRESTRICTED''
ELSE '' RESTRICTED TO '' + CAST(MAX_SIZE/(128*1024) AS VARCHAR(10)) + '' GB '' END
+ CASE IS_PERCENT_GROWTH WHEn 1 then '' [autogrowth by percent]'' else '''' end
from sys.database_files A
left join sys.filegroups fg on a.data_space_id = fg.data_space_id
order by A.type desc,A.name
;
'
--print #sql
EXEC sp_MSforeachdb #SQL
go
SELECT dbSize.*,fg.*,d.log_reuse_wait_desc,d.recovery_model_desc
FROM #tempFileInformation fg
LEFT JOIN sys.databases d on fg.DBNAME = d.name
CROSS APPLY
(
select dbname,
sum(FILESIZE_MB) as [totalDBSize_MB],
sum(FREESPACE_MB) as [DB_Free_Space_Size_MB],
sum(USEDSPACE_MB) as [DB_Used_Space_Size_MB]
from #tempFileInformation
where dbname = fg.dbname
group by dbname
)dbSize
go
DROP TABLE #tempFileInformation
Using this script you can show all the databases name and files used (with exception of system dbs).
select name,physical_name from sys.master_files where database_id > 4
To get around queries which error when multiple data files (e.g. ".ndf" file types) exist, try this version, it replaces the sub-queries with joins.
Here's a version of your query using joins instead of the sub-queries.
Cheers!
SELECT
db.name AS DBName,
db.database_id,
mfr.physical_name AS DataFile,
mfl.physical_name AS LogFile
FROM sys.databases db
JOIN sys.master_files mfr ON db.database_id=mfr.database_id AND mfr.type_desc='ROWS'
JOIN sys.master_files mfl ON db.database_id=mfl.database_id AND mfl.type_desc='LOG'
ORDER BY db.database_id
Sample Results:
(Please note, the single log file is paired with each MDF and NDF for a single database)
This script lists most of what you are looking for and can hopefully be modified to you needs. Note that it is creating a permanent table in there - you might want to change it. It is a subset from a larger script that also summarises backup and job information on various servers.
IF OBJECT_ID('tempdb..#DriveInfo') IS NOT NULL
DROP TABLE #DriveInfo
CREATE TABLE #DriveInfo
(
Drive CHAR(1)
,MBFree INT
)
INSERT INTO #DriveInfo
EXEC master..xp_fixeddrives
IF OBJECT_ID('[dbo].[Tmp_tblDatabaseInfo]', 'U') IS NOT NULL
DROP TABLE [dbo].[Tmp_tblDatabaseInfo]
CREATE TABLE [dbo].[Tmp_tblDatabaseInfo](
[ServerName] [nvarchar](128) NULL
,[DBName] [nvarchar](128) NULL
,[database_id] [int] NULL
,[create_date] datetime NULL
,[CompatibilityLevel] [int] NULL
,[collation_name] [nvarchar](128) NULL
,[state_desc] [nvarchar](60) NULL
,[recovery_model_desc] [nvarchar](60) NULL
,[DataFileLocations] [nvarchar](4000)
,[DataFilesMB] money null
,DataVolumeFreeSpaceMB INT NULL
,[LogFileLocations] [nvarchar](4000)
,[LogFilesMB] money null
,LogVolumeFreeSpaceMB INT NULL
) ON [PRIMARY]
INSERT INTO [dbo].[Tmp_tblDatabaseInfo]
SELECT
##SERVERNAME AS [ServerName]
,d.name AS DBName
,d.database_id
,d.create_date
,d.compatibility_level
,CAST(d.collation_name AS [nvarchar](128)) AS collation_name
,d.[state_desc]
,d.recovery_model_desc
,(select physical_name + ' | ' AS [text()]
from sys.master_files m
WHERE m.type = 0 and m.database_id = d.database_id
ORDER BY file_id
FOR XML PATH ('')) AS DataFileLocations
,(select sum(size) from sys.master_files m WHERE m.type = 0 and m.database_id = d.database_id) AS DataFilesMB
,NULL
,(select physical_name + ' | ' AS [text()]
from sys.master_files m
WHERE m.type = 1 and m.database_id = d.database_id
ORDER BY file_id
FOR XML PATH ('')) AS LogFileLocations
,(select sum(size) from sys.master_files m WHERE m.type = 1 and m.database_id = d.database_id) AS LogFilesMB
,NULL
FROM sys.databases d
WHERE d.database_id > 4 --Exclude basic system databases
UPDATE [dbo].[Tmp_tblDatabaseInfo]
SET DataFileLocations =
CASE WHEN LEN(DataFileLocations) > 4 THEN LEFT(DataFileLocations,LEN(DataFileLocations)-2) ELSE NULL END
,LogFileLocations =
CASE WHEN LEN(LogFileLocations) > 4 THEN LEFT(LogFileLocations,LEN(LogFileLocations)-2) ELSE NULL END
,DataFilesMB =
CASE WHEN DataFilesMB > 0 THEN DataFilesMB * 8 / 1024.0 ELSE NULL END
,LogFilesMB =
CASE WHEN LogFilesMB > 0 THEN LogFilesMB * 8 / 1024.0 ELSE NULL END
,DataVolumeFreeSpaceMB =
(SELECT MBFree FROM #DriveInfo WHERE Drive = LEFT( DataFileLocations,1))
,LogVolumeFreeSpaceMB =
(SELECT MBFree FROM #DriveInfo WHERE Drive = LEFT( LogFileLocations,1))
select * from [dbo].[Tmp_tblDatabaseInfo]
If you rename your Database, MS SQL Server does not rename the underlying files.
Following query gives you the current name of the database and the Logical file name (which might be the original name of the Database when it was created) and also corresponding physical file names.
Note: Un-comment the last line to see only the actual data files
select db.database_id,
db.name "Database Name",
files.name "Logical File Name",
files.physical_name
from sys.master_files files
join sys.databases db on db.database_id = files.database_id
-- and files.type_desc = 'ROWS'
Reference:
https://learn.microsoft.com/en-us/sql/relational-databases/system-catalog-views/sys-master-files-transact-sql?view=sql-server-ver15
https://learn.microsoft.com/en-us/sql/relational-databases/system-catalog-views/sys-databases-transact-sql?view=sql-server-ver15
Using the sp_MSForEachDB stored procedure is an option
EXEC sp_MSForEachDB 'use ? select * from sys.database_files'
Additionally to see just the Full Path name and size information
EXEC sp_MSForEachDB '
USE [?];
SELECT DB_NAME() AS DbName,
physical_name AS FullPath,
name AS FileName,
type_desc,
size/128.0 AS CurrentSizeMB,
size/128.0 - CAST(FILEPROPERTY(name, ''SpaceUsed'') AS INT)/128.0 AS FreeSpaceMB
FROM sys.database_files
WHERE type IN (0,1);
'
just adding my 2 cents .
if specifically looking to find total free space only in Data files or only in Log files in all the databases, we can use "data_space_id" column. 1 is for data files and 0 for log files.
CODE:
Create Table ##temp
(
DatabaseName sysname,
Name sysname,
spacetype sysname,
physical_name nvarchar(500),
size decimal (18,2),
FreeSpace decimal (18,2)
)
Exec sp_msforeachdb '
Use [?];
Insert Into ##temp (DatabaseName, Name,spacetype, physical_name, Size, FreeSpace)
Select DB_NAME() AS [DatabaseName], Name, ***data_space_id*** , physical_name,
Cast(Cast(Round(cast(size as decimal) * 8.0/1024.0,2) as decimal(18,2))/1024 as nvarchar) SizeGB,
Cast(Cast(Round(cast(size as decimal) * 8.0/1024.0,2)/1024 as decimal(18,2)) -
Cast(FILEPROPERTY(name, ''SpaceUsed'') * 8.0/1024.0 as decimal(18,2))/1024 as nvarchar) As FreeSpaceGB
From sys.database_files'
select
databasename
, sum(##temp.FreeSpace)
from
##temp
where
##temp.spacetype = 1
group by
DatabaseName
drop table ##temp
Also you can use this SQL query for retrieving files list :
SELECT d.name AS DatabaseName,
m.name AS LogicalName,
m.physical_name AS PhysicalName,
size AS FileSize
FROM sys.master_files m
INNER JOIN sys.databases d ON(m.database_id = d.database_id)
where d.name = '<Database Name>'
ORDER BY physical_name ;
You can use the below:
SP_HELPDB [Master]
GO

SQL Server: combining multiple rows into one row

I have a SQL query like this;
SELECT *
FROM Jira.customfieldvalue
WHERE CUSTOMFIELD = 12534
AND ISSUE = 19602
And that's the results;
What I want is; showing in one row (cell) combined all STRINGVALUE's and they are separated with a comma. Like this;
SELECT --some process with STRINGVALUE--
FROM Jira.customfieldvalue
WHERE CUSTOMFIELD = 12534
AND ISSUE = 19602
Araç Listesi (C2, K1 vb.Belgeler; yoksa Ruhsat Fotokopileri), Min. 5
araç plakası için İnternet Sorgusu, Son 3 Yıla Ait Onaylı Yıl Sonu
Bilanço + Gelir Tablosu, Son Yıl (Yıl Sonuna ait) Detay Mizanı, İçinde
Bulunduğumuz Yıla ait Ara Dönem Geçici Vergi Beyannamesi, Bayi Yorum
E-Maili, Proforma Fatura
How can I do that?
There are several methods.
If you want just the consolidated string value returned, this is a good quick and easy approach
DECLARE #combinedString VARCHAR(MAX)
SELECT #combinedString = COALESCE(#combinedString + ', ', '') + stringvalue
FROM jira.customfieldValue
WHERE customfield = 12534
AND ISSUE = 19602
SELECT #combinedString as StringValue
Which will return your combined string.
You can also try one of the XML methods e.g.
SELECT DISTINCT Issue, Customfield, StringValues
FROM Jira.customfieldvalue v1
CROSS APPLY ( SELECT StringValues + ','
FROM jira.customfieldvalue v2
WHERE v2.Customfield = v1.Customfield
AND v2.Issue = v1.issue
ORDER BY ID
FOR XML PATH('') ) D ( StringValues )
WHERE customfield = 12534
AND ISSUE = 19602
You can achieve this is to combine For XML Path and STUFF as follows:
SELECT (STUFF((
SELECT ', ' + StringValue
FROM Jira.customfieldvalue
WHERE CUSTOMFIELD = 12534
AND ISSUE = 19602
FOR XML PATH('')
), 1, 2, '')
) AS StringValue
This is an old question, but as of the release of Microsoft SQL Server 2017 you can now use the STRING_AGG() function which is much like the GROUP_CONCAT function in MySQL.
STRING_AGG (Transact-SQL) Documentation
Example
USE AdventureWorks2016
GO
SELECT STRING_AGG (CONVERT(NVARCHAR(max),FirstName), ',') AS csv
FROM Person.Person;
Returns
Syed,Catherine,Kim,Kim,Kim,Hazem
There's a convenient method for this in MySql called GROUP_CONCAT. An equivalent for SQL Server doesn't exist, but you can write your own using the SQLCLR. Luckily someone already did that for you.
Your query then turns into this (which btw is a much nicer syntax):
SELECT CUSTOMFIELD, ISSUE, dbo.GROUP_CONCAT(STRINGVALUE)
FROM Jira.customfieldvalue
WHERE CUSTOMFIELD = 12534 AND ISSUE = 19602
GROUP BY CUSTOMFIELD, ISSUE
But please note that this method is good for at the most 100 rows within a group. Beyond that, you'll have major performance problems. SQLCLR aggregates have to serialize any intermediate results and that quickly piles up to quite a lot of work. Keep this in mind!
Interestingly the FOR XML doesn't suffer from the same problem but instead uses that horrendous syntax.
I believe for databases which support listagg function, you can do:
select id, issue, customfield, parentkey, listagg(stingvalue, ',') within group (order by id)
from jira.customfieldvalue
where customfield = 12534 and issue = 19602
group by id, issue, customfield, parentkey
Using MySQL inbuilt function group_concat() will be a good choice for getting the desired result. The syntax will be -
SELECT group_concat(STRINGVALUE)
FROM Jira.customfieldvalue
WHERE CUSTOMFIELD = 12534
AND ISSUE = 19602
Before you execute the above command make sure you increase the size of group_concat_max_len else the the whole output may not fit in that cell.
To set the value of group_concat_max_len, execute the below command-
SET group_concat_max_len = 50000;
You can change the value 50000 accordingly, you increase it to a higher value as required.
CREATE VIEW [dbo].[ret_vwSalariedForReport]
AS
WITH temp1 AS (SELECT
salaried.*,
operationalUnits.Title as OperationalUnitTitle
FROM
ret_vwSalaried salaried LEFT JOIN
prs_operationalUnitFeatures operationalUnitFeatures on salaried.[Guid] = operationalUnitFeatures.[FeatureGuid] LEFT JOIN
prs_operationalUnits operationalUnits ON operationalUnits.id = operationalUnitFeatures.OperationalUnitID
),
temp2 AS (SELECT
t2.*,
STUFF ((SELECT ' - ' + t1.OperationalUnitTitle
FROM
temp1 t1
WHERE t1.[ID] = t2.[ID]
For XML PATH('')), 2, 2, '') OperationalUnitTitles from temp1 t2)
SELECT
[Guid],
ID,
Title,
PersonnelNo,
FirstName,
LastName,
FullName,
Active,
SSN,
DeathDate,
SalariedType,
OperationalUnitTitles
FROM
temp2
GROUP BY
[Guid],
ID,
Title,
PersonnelNo,
FirstName,
LastName,
FullName,
Active,
SSN,
DeathDate,
SalariedType,
OperationalUnitTitles
declare #maxColumnCount int=0;
declare #Query varchar(max)='';
declare #DynamicColumnName nvarchar(MAX)='';
-- table type variable that store all values of column row no
DECLARE #TotalRows TABLE( row_count int)
INSERT INTO #TotalRows (row_count)
SELECT (ROW_NUMBER() OVER(PARTITION BY InvoiceNo order by InvoiceNo Desc)) as row_no FROM tblExportPartProforma
-- Get the MAX value from #TotalRows table
set #maxColumnCount= (select max(row_count) from #TotalRows)
-- loop to create Dynamic max/case and store it into local variable
DECLARE #cnt INT = 1;
WHILE #cnt <= #maxColumnCount
BEGIN
set #DynamicColumnName= #DynamicColumnName + ', Max(case when row_no= '+cast(#cnt as varchar)+' then InvoiceType end )as InvoiceType'+cast(#cnt as varchar)+''
set #DynamicColumnName= #DynamicColumnName + ', Max(case when row_no= '+cast(#cnt as varchar)+' then BankRefno end )as BankRefno'+cast(#cnt as varchar)+''
set #DynamicColumnName= #DynamicColumnName + ', Max(case when row_no= '+cast(#cnt as varchar)+' then AmountReceived end )as AmountReceived'+cast(#cnt as varchar)+''
set #DynamicColumnName= #DynamicColumnName + ', Max(case when row_no= '+cast(#cnt as varchar)+' then AmountReceivedDate end )as AmountReceivedDate'+cast(#cnt as varchar)+''
SET #cnt = #cnt + 1;
END;
-- Create dynamic CTE and store it into local variable #query
set #Query='
with CTE_tbl as
(
SELECT InvoiceNo,InvoiceType,BankRefno,AmountReceived,AmountReceivedDate,
ROW_NUMBER() OVER(PARTITION BY InvoiceNo order by InvoiceNo Desc) as row_no
FROM tblExportPartProforma
)
select
InvoiceNo
'+#DynamicColumnName+'
FROM CTE_tbl
group By InvoiceNo'
-- Execute the Query
execute (#Query)

Resources