TimescaleDB: pg stat statements

From wikieduonline
Revision as of 09:20, 24 August 2023 by Welcome (talk | contribs)
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
Jump to navigation Jump to search
create extension if not exists pg_stat_statements;
\! rm -f ./pg_*.csv ./timescaledb_information*.csv ./_timescaledb_internal*.csv ./hypertable*.csv
\copy (select * from pg_settings) to './pg_settings.csv' with (format csv, header, quote '"');
\copy (select * from pg_available_extensions) to './pg_available_extensions.csv' with (format csv, header, quote '"');
\copy (select * from pg_extension union all SELECT null as oid, 'pg_version' as extname, null as extowner, null as extnamespace,null as extrelocatable,split_part(version(),' ',2) as extversion,null as extconfig,null as extcondition) to './pg_extension.csv' with (format csv, header, quote '"');
\copy (select * from pg_stat_statements) to './pg_stat_statements.csv' with (format csv, header, quote '"');
\copy (select * from pg_stat_activity) to './pg_stat_activity.csv' with (format csv, header, quote '"');
\copy (select n.nspname as schema, c.relname as name, c.relkind, pg_relation_size(c.oid) as relation_size, pg_indexes_size(c.oid) as index_size, pg_total_relation_size(c.oid) as total_size from pg_class c join pg_namespace n on c.relnamespace = n.oid order by 4 desc) to './pg_relation_size.csv' with (format csv, header, quote '"');
\copy (select schemaname, relname, pg_size_pretty(pg_relation_size(schemaname|| '.' || relname)) as size_p, n_live_tup, n_dead_tup, case when n_live_tup > 0 then round((n_dead_tup::float / n_live_tup::float)::numeric, 4) end as dead_tup_ratio, last_autovacuum, last_autoanalyze, last_vacuum, last_analyze from pg_stat_user_tables order by dead_tup_ratio desc nulls last) to './pg_stat_user_tables.csv' with (format csv, header, quote '"');
\copy (select pg_size_pretty(pg_database_size(datname)) db_size_p, pg_size_pretty(temp_bytes) temp_size_p, db.* from pg_stat_database db) to './pg_stat_database.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.chunks) to './timescaledb_information.chunks.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.compression_settings) to './timescaledb_information.compression_settings.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.continuous_aggregates) to './timescaledb_information.continuous_aggregates.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.data_nodes) to './timescaledb_information.data_nodes.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.dimensions) to './timescaledb_information.dimensions.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.hypertables) to './timescaledb_information.hypertables.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.jobs) to './timescaledb_information.jobs.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.job_errors) to './timescaledb_information.job_errors.csv' with (format csv, header, quote '"');
\copy (select * from timescaledb_information.job_stats) to './timescaledb_information.job_stats.csv' with (format csv, header, quote '"');
\copy (select * from _timescaledb_internal.compressed_chunk_stats) to './_timescaledb_internal.compressed_chunk_stats.csv' with (format csv, header, quote '"');
\copy (select * from _timescaledb_internal.hypertable_chunk_local_size) to './_timescaledb_internal.hypertable_chunk_local_size.csv' with (format csv, header, quote '"');
\copy (select h.hypertable_schema, h.hypertable_name, d.chunk_name, d.is_compressed, pg_size_pretty(d.total_bytes) chunk_size_p, d.range_start, d.range_end, d.total_bytes, d.table_bytes, d.index_bytes, d.toast_bytes from timescaledb_information.hypertables h join lateral (select c.chunk_name, c.is_compressed, c.range_start, c.range_end, f.total_bytes, f.table_bytes, f.index_bytes, f.toast_bytes from chunks_detailed_size(concat(h.hypertable_schema::text,'.',h.hypertable_name::text)) f join timescaledb_information.chunks c on f.chunk_name = c.chunk_name ) d on true order by 1, 5 desc) to './timescaledb_information.chunks_detailed_size.csv' with (format csv, header, quote '"');
\copy (select t1.table_schema, t1.table_name, t1.columns, t2.indexes from (select cl.table_schema, cl.table_name as table_name, json_agg(jsonb_build_object(cl.column_name, cl.data_type) order by cl.column_name) as columns from information_schema.columns cl inner join timescaledb_information.hypertables ht on (ht.hypertable_schema=cl.table_schema and ht.hypertable_name=cl.table_name) group by cl.table_schema, cl.table_name) t1 left join (select id.schemaname, id.tablename as table_name, json_agg(jsonb_build_object(id.indexname, id.indexdef) order by id.indexname) as indexes from pg_indexes id inner join timescaledb_information.hypertables ht on (ht.hypertable_schema=id.schemaname and ht.hypertable_name=id.tablename) group by id.schemaname, id.tablename) t2 on (t2.schemaname=t1.table_schema and t1.table_name=t2.table_name)) to './hypertable_definitions.csv' with (format csv, header, quote '"');
\copy (SELECT r.rolname AS role_name, r.rolcanlogin as can_login, r.rolsuper AS superuser, r.rolinherit AS inherits, r.rolcreaterole AS create_role, r.rolcreatedb AS create_db, ARRAY( SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) AS member_of FROM pg_catalog.pg_roles r ORDER BY r.rolname) to './pg_roles.csv' with (format csv, header, quote '"');
\! tar czvf timescaledb_information.tar.gz ./pg_*.csv ./timescaledb_information*.csv ./_timescaledb_internal*.csv ./hypertable*.csv
\! rm -f ./pg_*.csv ./timescaledb_information*.csv ./_timescaledb_internal*.csv ./hypertable*.csv


See also[edit]

Advertising: