【CSDN 编者按】春节将至,俗话说“腊月二十四,掸尘扫房子”,很多人会在腊月二十四给家里做大扫除迎新春。近年来数据呈爆发式增长,你是否和本文作者一样,常常收到数据库空间的告警呢?那来给数据库做一场“大扫除”试试看?
SELECT relname, indexrelname, idx_scan, idx_tup_read, idx_tup_fetch, pg_size_pretty(pg_relation_size(indexrelname::regclass)) as sizeFROM pg_stat_all_indexesWHERE schemaname = 'public' AND indexrelname NOT LIKE 'pg_toast_%' AND idx_scan = 0 AND idx_tup_read = 0 AND idx_tup_fetch = 0ORDER BY size DESC;
-- Find table oid by nameSELECT oid FROM pg_class c WHERE relname = 'table_name';-- Reset counts for all indexes of tableSELECT pg_stat_reset_single_table_counters(14662536);
REINDEX INDEX index_name;
REINDEX INDEX CONCURRENTLY index_name;
SELECT c.relname as index_name, pg_size_pretty(pg_relation_size(c.oid))FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oidWHERE -- New index built using REINDEX CONCURRENTLY c.relname LIKE '%_ccnew' -- In INVALID state AND NOT indisvalidLIMIT 10;
-- Activating de-deduplication for a B-Tree index, this is the default:CREATE INDEX index_name ON table_name(column_name) WITH (deduplicate_items = ON)
db=# CREATE test_btree_dedup (n_unique serial, n_not_unique integer);CREATE TABLE
db=# INSERT INTO test_btree_dedup (n_not_unique)SELECT (random() * 100)::int FROM generate_series(1, 1000000);INSERT 0 1000000
db=# CREATE INDEX ix1 ON test_btree_dedup (n_unique) WITH (deduplicate_items = OFF);CREATE INDEX
db=# CREATE INDEX ix2 ON test_btree_dedup (n_unique) WITH (deduplicate_items = ON);CREATE INDEX
db=# CREATE INDEX ix3 ON test_btree_dedup (n_not_unique) WITH (deduplicate_items = OFF);CREATE INDEX
db=# CREATE INDEX ix4 ON test_btree_dedup (n_not_unique) WITH (deduplicate_items = ON);CREATE INDEX
-- Will lock the tableVACUUM FULL table_name;
CREATE EXTENSION pg_repack;
$ pg_repack -k --table table_name db_name
CREATE INDEX transaction_cancelled_by_ix ON transactions(cancelled_by_user_id);
DROP INDEX transaction_cancelled_by_ix;
CREATE INDEX transaction_cancelled_by_part_ix ON transactions(cancelled_by_user_id)WHERE cancelled_by_user_id IS NOT NULL;
-- Find indexed columns with high null_fracSELECT c.oid, c.relname AS index, pg_size_pretty(pg_relation_size(c.oid)) AS index_size, i.indisunique AS unique, a.attname AS indexed_column, CASE s.null_frac WHEN 0 THEN '' ELSE to_char(s.null_frac * 100, '999.00%') END AS null_frac, pg_size_pretty((pg_relation_size(c.oid) * s.null_frac)::bigint) AS expected_saving -- Uncomment to include the index definition --, ixs.indexdef
FROM pg_class c JOIN pg_index i ON i.indexrelid = c.oid JOIN pg_attribute a ON a.attrelid = c.oid JOIN pg_class c_table ON c_table.oid = i.indrelid JOIN pg_indexes ixs ON c.relname = ixs.indexname LEFT JOIN pg_stats s ON s.tablename = c_table.relname AND a.attname = s.attname
WHERE -- Primary key cannot be partial NOT i.indisprimary
-- Exclude already partial indexes AND i.indpred IS NULL
-- Exclude composite indexes AND array_length(i.indkey, 1) = 1
-- Larger than 10MB AND pg_relation_size(c.oid) > 10 * 1024 ^ 2
ORDER BY pg_relation_size(c.oid) * s.null_frac DESC;
from django.db import modelsfrom django.contrib.auth.models import User
class Transaction(models.Model): # ... cancelled_by_user = models.ForeignKey( to=User, null=True, on_delete=models.CASCADE, )
from django.db import modelsfrom django.contrib.auth.models import User
class Transaction(models.Model): # ... cancelled_by_user = models.ForeignKey( to=User, null=True, on_delete=models.CASCADE, db_index=False, )
class Meta: indexes = ( models.Index( fields=('cancelled_by_user_id', ), name='%(class_name)s_cancelled_by_part_ix', condition=Q(cancelled_by_user_id__isnull=False), ), )
程序员如何避免陷入“内卷”、选择什么技术最有前景,中国开发者现状与技术趋势究竟是什么样?快来参与「2020 中国开发者大调查」,更有丰富奖品送不停!
☞任正非就注册姚安娜商标道歉;人人影视字幕组因盗版被查;JIRA、Confluence 等产品本月停售本地化版本 | 极客头条
☞三年已投 1000 亿打造的达摩院,何以仗剑走天涯?