Powershell, Excel e SQL uma combinação excelente

Getting your Trinity Audio player ready...

Uma coisa muito legal em automatizar tarefas é a dificuldade que temos em planejar o que vai ser executado, a forma que vai ser executado e manter da forma mais genérica possível para garantir que vai funcionar na maior parte dos ambientes sem precisar sofrer muita alteração…

Agora imagina uma rotina qualquer em que você precisa ficar pegando resultados colocando no excel para fazer alguma graça interessante para alguém…

O posh abaixo faz exatamente isso. Ele conecta em um servidor que você definir, pega o nome dos bancos e executa a mesma query em cada um dos bancos,,,

até aí nenhuma novidade, certo?

O legal é que o resultado já vai para o excel, formatado e com auto filtro… e cada guia é a resposta de um banco deste servidor.

Você pode alterar a query para executar o que quiser, fragmentação de índice? blz…. um único select de uma única base? sem problemas,,, ou verificar estimativa de compactação de tabelas e índices usando compactação PAGE? é para isso que estamos aqui….


$servers = "SERVIDOR"

#lista as bases de dados para entrar no looping
$databases =
@'
SELECT
name
FROM sys.sysdatabases
where dbid>4
'@

$resultsDB = (invoke-sqlcmd -ServerInstance $servers -Query $databases).name

#query para ser executada em cada base do looping
$query2 =
@'
declare @scanupd TABLE
(
table_name sysname NULL
,index_name sysname NULL
,partition int NULL
, index_id int NULL
,index_type nvarchar(12) NULL
,percent_scan bigint NULL
,percent_update bigint NULL
)

DECLARE @CompressionSavingsEstimate table
(
SchemaName sysname NOT NULL,
ObjectName sysname NOT NULL,
IndexName sysname NOT NULL,
IndexType nvarchar(60) NOT NULL,
PartitionNum int NOT NULL,
CompressionType nvarchar(10) NOT NULL,
[size_with_current_compression_setting (KB)] bigint NOT NULL,
[size_with_requested_compression_setting (KB)] bigint NOT NULL,
[sample_size_with_current_compression_setting (KB)] bigint NOT NULL,
[sample_size_with_requested_compression_setting (KB)] bigint NOT NULL,
percent_scan bigint NULL,
percent_update bigint NULL
PRIMARY KEY (SchemaName, ObjectName, IndexName, IndexType, PartitionNum, CompressionType)
);
DECLARE @ProcResult table
(
[object_name] sysname NOT NULL,
[schema_name] sysname NOT NULL,
[index_id] int NOT NULL,
[partition_number] int NOT NULL,
[size_with_current_compression_setting (KB)] bigint NOT NULL,
[size_with_requested_compression_setting (KB)] bigint NOT NULL,
[sample_size_with_current_compression_setting (KB)] bigint NOT NULL,
[sample_size_with_requested_compression_setting (KB)] bigint NOT NULL
);
DECLARE @SchemaName sysname;
DECLARE @ObjectName sysname;
DECLARE @IndexID int;
DECLARE @IndexName sysname;
DECLARE @IndexType nvarchar(60);
DECLARE @PartitionNum int;
DECLARE @CompTypeNum tinyint;
DECLARE @CompressionType nvarchar(60);

SET NOCOUNT ON;

DECLARE CompressedIndex INSENSITIVE CURSOR FOR
SELECT s.name AS SchemaName,
o.name AS ObjectName,
i.index_id AS IndexID,
COALESCE(i.name, '<HEAP>') AS IndexName,
i.type_desc AS IndexType,
p.partition_number AS PartitionNum
FROM sys.schemas AS s
INNER JOIN sys.objects AS o
ON s.schema_id = o.schema_id
INNER JOIN sys.indexes AS i
ON o.object_id = i.object_id
INNER JOIN sys.partitions AS p
ON o.object_id = p.object_id
AND
i.index_id = p.index_id
WHERE o.type_desc IN ('USER_TABLE','VIEW')
AND p.data_compression_desc NOT IN ('PAGE','ROW');

OPEN CompressedIndex;

WHILE 1 = 1
BEGIN
FETCH NEXT FROM CompressedIndex
INTO @SchemaName, @ObjectName, @IndexID, @IndexName, @IndexType, @PartitionNum;

IF @@FETCH_STATUS <> 0
BREAK;

SELECT @CompTypeNum = 2;
WHILE @CompTypeNum <= 2
BEGIN
SELECT @CompressionType = CASE @CompTypeNum
WHEN 0 THEN 'NONE'
WHEN 1 THEN 'ROW'
WHEN 2 THEN 'PAGE'
END;

DELETE FROM @ProcResult;

-- RAISERROR('Estimating compression savings using "%s" compression for object "%s.%s", index "%s", partition %d...', 10, 1, @CompressionType, @SchemaName, @ObjectName, @IndexName, @PartitionNum);

INSERT INTO @ProcResult
EXEC sp_estimate_data_compression_savings @schema_name = @SchemaName,
@object_name = @ObjectName,
@index_id = @IndexID,
@partition_number = @PartitionNum,
@data_compression = @CompressionType;

INSERT INTO @CompressionSavingsEstimate
(
SchemaName,
ObjectName,
IndexName,
IndexType,
PartitionNum,
CompressionType,
[size_with_current_compression_setting (KB)],
[size_with_requested_compression_setting (KB)],
[sample_size_with_current_compression_setting (KB)],
[sample_size_with_requested_compression_setting (KB)]
)
SELECT [schema_name],
[object_name],
@IndexName,
@IndexType,
[partition_number],
@CompressionType,
[size_with_current_compression_setting (KB)],
[size_with_requested_compression_setting (KB)],
[sample_size_with_current_compression_setting (KB)],
[sample_size_with_requested_compression_setting (KB)]
FROM @ProcResult;

SELECT @CompTypeNum += 1;
END;
END;

CLOSE CompressedIndex;
DEALLOCATE CompressedIndex;
insert into @scanupd(table_name, index_name, partition,index_id,index_type,percent_scan,percent_update)
SELECT o.NAME AS [Table_Name]
,x.NAME AS [Index_Name]
,i.partition_number AS [Partition]
,i.index_id AS [Index_ID]
,x.type_desc AS [Index_Type]
,i.range_scan_count * 100.0 / (i.range_scan_count + i.leaf_insert_count + i.leaf_delete_count + i.leaf_update_count + i.leaf_page_merge_count + i.singleton_lookup_count) AS [Percent_Scan]
,i.leaf_update_count * 100.0 / (i.range_scan_count + i.leaf_insert_count + i.leaf_delete_count + i.leaf_update_count + i.leaf_page_merge_count + i.singleton_lookup_count) AS [Percent_Update]
FROM sys.dm_db_index_operational_stats(db_id(), NULL, NULL, NULL) i
JOIN sys.objects o ON o.object_id = i.object_id
JOIN sys.indexes x ON x.object_id = i.object_id
AND x.index_id = i.index_id
WHERE (i.range_scan_count + i.leaf_insert_count + i.leaf_delete_count + leaf_update_count + i.leaf_page_merge_count + i.singleton_lookup_count) != 0
AND objectproperty(i.object_id, 'IsUserTable') = 1
ORDER BY [Percent_Scan] DESC
SELECT CompressionType,
IndexName,
IndexType,
ObjectName,
PartitionNum,
CASE WHEN t.[percent scan] >100 then 100 else [percent scan] END AS [percent scan],
CASE WHEN t.[percent update] >100 then 100 else [percent update] END AS [percent update],
SchemaName,
[size_with_current_compression_setting (KB)],
[size_with_requested_compression_setting (KB)],
([size_with_requested_compression_setting (KB)]/NULLIF([size_with_current_compression_setting (KB)],0)*100) AS [X percent compact],
CASE WHEN (([size_with_requested_compression_setting (KB)]/NULLIF([size_with_current_compression_setting (KB)],0)*100) <=80 OR ([size_with_requested_compression_setting (KB)]/NULLIF([size_with_current_compression_setting (KB)],0)*100) IS NULL) AND [t].[percent scan] >=t.[percent update] THEN 'Compactar' ELSE 'Provavelmente Nao' END as [Z result]
FROM (
SELECT CompressionType,
IndexName,
IndexType,
ObjectName,
PartitionNum,
sum(s.percent_scan) AS [percent scan],
sum(s.percent_update) AS [percent update],
SchemaName,
cast(AVG([size_with_current_compression_setting (KB)]) AS money) AS [size_with_current_compression_setting (KB)],
cast(AVG([size_with_requested_compression_setting (KB)]) AS money) AS [size_with_requested_compression_setting (KB)]
FROM @CompressionSavingsEstimate CSE
INNER JOIN @scanupd AS s
ON CSE.ObjectName = S.table_name
GROUP BY GROUPING SETS (
(CompressionType),
(SchemaName, ObjectName, IndexName, IndexType, PartitionNum, CompressionType)
)
)AS t
ORDER BY SchemaName, ObjectName, IndexName, IndexType, PartitionNum, CompressionType DESC;

SET NOCOUNT OFF;
'@

# abre o excel
$xl = new-object -comobject excel.application
$wb = $xl.Workbooks.Add()
#$ExcelWorkSheet = $wb.Worksheets.Add()
$xl.Visible = $true
#$rowCount = 2

foreach ($s in $resultsDB)
{
$s #pode ser removido
$ExcelWorkSheet = $wb.Worksheets.Add()
$ExcelWorkSheet.Name = $s #usa o nome do banco como nome da planilha
$rowCount = 2
$results = invoke-sqlcmd -ServerInstance $servers -Query $query2 -Database $s -querytimeout ([int]::MaxValue) #timeout nunca !!!

foreach ($r in $results)
{
$r #pode ser removido
$ColCount = 1
$ExcelWorkSheet.Cells.Item($rowCount,$ColCount).Value2 = $servers
$ColCount = 2
for ($i=1; $i -le $r.ItemArray.Count; $i ++)
{
$ExcelWorkSheet.Cells.Item($rowCount,$ColCount).Value2 = $r.ItemArray[$i-1]
$ColCount ++
}
$rowCount ++
}
# adiciona o cabeçalho das colunas
$headers = $results | Get-Member -Membertype property
$ExcelWorkSheet.Cells.Item(1,1).Value2 = 'server'
$h = 2

foreach ($header in $headers)
{
$ExcelWorkSheet.Cells.Item(1,$h).Value2 = $header.name
$h ++
}
## Formata a planilha do Excel
$listObject = $ExcelWorkSheet.ListObjects.Add([Microsoft.Office.Interop.Excel.XlListObjectSourceType]::xlSrcRange, $ExcelWorkSheet.UsedRange, $null,[Microsoft.Office.Interop.Excel.XlYesNoGuess]::xlYes,$null)
$listObject.Name = "User Table"
$listObject.TableStyle = "TableStyleLight10"
## ajusta o tamanho das colunas
$ExcelWorkSheet.UsedRange.Columns.Autofit() | Out-Null

}

PS.: ainda estou tentando entender qual a birra do powershell em querer colocar o resultado das colunas em ordem alfabética,,, assim que resolver este detalhe atualizo o código,,,

One thought on “Powershell, Excel e SQL uma combinação excelente”

Leave a Reply

Your email address will not be published. Required fields are marked *