mirror of
https://github.com/postgres/postgres.git
synced 2026-04-15 22:10:45 -04:00
Change default of max_locks_per_transactions to 128
The previous commits reduced the amount of memory available for locks by eliminating the "safety margins" and by settling the split between LOCK and PROCLOCK tables at startup. The allocation is now more deterministic, but it also means that you often hit one of the limits sooner than before. To compensate for that, bump up max_locks_per_transactions from 64 to 128. With that there is a little more space in the both hash tables than what was the effective maximum size for either table before the previous commits. This only changes the default, so if you had changed max_locks_per_transactions in postgresql.conf, you will still have fewer locks available than before for the same setting value. This should be noted in the release notes. A good rule of thumb is that if you double max_locks_per_transactions, you should be able to get as many locks as before. Reviewed-by: Ashutosh Bapat <ashutosh.bapat.oss@gmail.com> Reviewed-by: Matthias van de Meent <boekewurm+postgres@gmail.com> Discussion: https://www.postgresql.org/message-id/e07be2ba-856b-4ff5-8313-8b58b6b4e4d0@iki.fi
This commit is contained in:
parent
e1ad034809
commit
79534f9065
5 changed files with 6 additions and 6 deletions
|
|
@ -11470,7 +11470,7 @@ dynamic_library_path = '/usr/local/lib/postgresql:$libdir'
|
|||
can lock more objects as long as the locks of all transactions
|
||||
fit in the lock table. This is <emphasis>not</emphasis> the number of
|
||||
rows that can be locked; that value is unlimited. The default,
|
||||
64, has historically proven sufficient, but you might need to
|
||||
128, has historically proven sufficient, but you might need to
|
||||
raise this value if you have queries that touch many different
|
||||
tables in a single transaction, e.g., query of a parent table with
|
||||
many children. This parameter can only be set at server start.
|
||||
|
|
|
|||
|
|
@ -593,7 +593,7 @@ InitializeFastPathLocks(void)
|
|||
* value at FP_LOCK_GROUPS_PER_BACKEND_MAX and insist the value is at
|
||||
* least 1.
|
||||
*
|
||||
* The default max_locks_per_transaction = 64 means 4 groups by default.
|
||||
* The default max_locks_per_transaction = 128 means 8 groups by default.
|
||||
*/
|
||||
FastPathLockGroupsPerBackend =
|
||||
Max(Min(pg_nextpower2_32(max_locks_per_xact) / FP_LOCK_SLOTS_PER_GROUP,
|
||||
|
|
|
|||
|
|
@ -1979,7 +1979,7 @@
|
|||
short_desc => 'Sets the maximum number of locks per transaction.',
|
||||
long_desc => 'The shared lock table is sized on the assumption that at most "max_locks_per_transaction" objects per server process or prepared transaction will need to be locked at any one time.',
|
||||
variable => 'max_locks_per_xact',
|
||||
boot_val => '64',
|
||||
boot_val => '128',
|
||||
min => '10',
|
||||
max => 'INT_MAX',
|
||||
},
|
||||
|
|
|
|||
|
|
@ -856,7 +856,7 @@
|
|||
#------------------------------------------------------------------------------
|
||||
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
#max_locks_per_transaction = 128 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
|
|
|
|||
|
|
@ -722,7 +722,7 @@ GuessControlValues(void)
|
|||
ControlFile.max_wal_senders = 10;
|
||||
ControlFile.max_worker_processes = 8;
|
||||
ControlFile.max_prepared_xacts = 0;
|
||||
ControlFile.max_locks_per_xact = 64;
|
||||
ControlFile.max_locks_per_xact = 128;
|
||||
|
||||
ControlFile.maxAlign = MAXIMUM_ALIGNOF;
|
||||
ControlFile.floatFormat = FLOATFORMAT_VALUE;
|
||||
|
|
@ -931,7 +931,7 @@ RewriteControlFile(void)
|
|||
ControlFile.max_wal_senders = 10;
|
||||
ControlFile.max_worker_processes = 8;
|
||||
ControlFile.max_prepared_xacts = 0;
|
||||
ControlFile.max_locks_per_xact = 64;
|
||||
ControlFile.max_locks_per_xact = 128;
|
||||
|
||||
/* The control file gets flushed here. */
|
||||
update_controlfile(".", &ControlFile, true);
|
||||
|
|
|
|||
Loading…
Reference in a new issue