]> granicus.if.org Git - postgresql/commitdiff
Remove more volatile qualifiers.
authorRobert Haas <rhaas@postgresql.org>
Tue, 6 Oct 2015 19:45:02 +0000 (15:45 -0400)
committerRobert Haas <rhaas@postgresql.org>
Tue, 6 Oct 2015 19:45:02 +0000 (15:45 -0400)
Prior to commit 0709b7ee72e4bc71ad07b7120acd117265ab51d0, access to
variables within a spinlock-protected critical section had to be done
through a volatile pointer, but that should no longer be necessary.
This continues work begun in df4077cda2eae3eb4a5cf387da0c1e7616e73204
and 6ba4ecbf477e0b25dd7bde1b0c4e07fc2da19348.

Thomas Munro and Michael Paquier

src/backend/postmaster/checkpointer.c
src/backend/replication/logical/logical.c
src/backend/replication/slot.c
src/backend/replication/walreceiver.c
src/backend/replication/walreceiverfuncs.c
src/backend/replication/walsender.c

index 3b3a09ef8860b0497a415cec0ca1d823aaa78841..dc5b85615506717979870b56021d8e09565a0601 100644 (file)
@@ -288,13 +288,10 @@ CheckpointerMain(void)
                /* Warn any waiting backends that the checkpoint failed. */
                if (ckpt_active)
                {
-                       /* use volatile pointer to prevent code rearrangement */
-                       volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
-
-                       SpinLockAcquire(&cps->ckpt_lck);
-                       cps->ckpt_failed++;
-                       cps->ckpt_done = cps->ckpt_started;
-                       SpinLockRelease(&cps->ckpt_lck);
+                       SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
+                       CheckpointerShmem->ckpt_failed++;
+                       CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
+                       SpinLockRelease(&CheckpointerShmem->ckpt_lck);
 
                        ckpt_active = false;
                }
@@ -428,9 +425,6 @@ CheckpointerMain(void)
                        bool            ckpt_performed = false;
                        bool            do_restartpoint;
 
-                       /* use volatile pointer to prevent code rearrangement */
-                       volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
-
                        /*
                         * Check if we should perform a checkpoint or a restartpoint. As a
                         * side-effect, RecoveryInProgress() initializes TimeLineID if
@@ -443,11 +437,11 @@ CheckpointerMain(void)
                         * checkpoint we should perform, and increase the started-counter
                         * to acknowledge that we've started a new checkpoint.
                         */
-                       SpinLockAcquire(&cps->ckpt_lck);
-                       flags |= cps->ckpt_flags;
-                       cps->ckpt_flags = 0;
-                       cps->ckpt_started++;
-                       SpinLockRelease(&cps->ckpt_lck);
+                       SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
+                       flags |= CheckpointerShmem->ckpt_flags;
+                       CheckpointerShmem->ckpt_flags = 0;
+                       CheckpointerShmem->ckpt_started++;
+                       SpinLockRelease(&CheckpointerShmem->ckpt_lck);
 
                        /*
                         * The end-of-recovery checkpoint is a real checkpoint that's
@@ -505,9 +499,9 @@ CheckpointerMain(void)
                        /*
                         * Indicate checkpoint completion to any waiting backends.
                         */
-                       SpinLockAcquire(&cps->ckpt_lck);
-                       cps->ckpt_done = cps->ckpt_started;
-                       SpinLockRelease(&cps->ckpt_lck);
+                       SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
+                       CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
+                       SpinLockRelease(&CheckpointerShmem->ckpt_lck);
 
                        if (ckpt_performed)
                        {
@@ -957,8 +951,6 @@ CheckpointerShmemInit(void)
 void
 RequestCheckpoint(int flags)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
        int                     ntries;
        int                     old_failed,
                                old_started;
@@ -992,13 +984,13 @@ RequestCheckpoint(int flags)
         * a "stronger" request by another backend.  The flag senses must be
         * chosen to make this work!
         */
-       SpinLockAcquire(&cps->ckpt_lck);
+       SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
 
-       old_failed = cps->ckpt_failed;
-       old_started = cps->ckpt_started;
-       cps->ckpt_flags |= flags;
+       old_failed = CheckpointerShmem->ckpt_failed;
+       old_started = CheckpointerShmem->ckpt_started;
+       CheckpointerShmem->ckpt_flags |= flags;
 
-       SpinLockRelease(&cps->ckpt_lck);
+       SpinLockRelease(&CheckpointerShmem->ckpt_lck);
 
        /*
         * Send signal to request checkpoint.  It's possible that the checkpointer
@@ -1046,9 +1038,9 @@ RequestCheckpoint(int flags)
                /* Wait for a new checkpoint to start. */
                for (;;)
                {
-                       SpinLockAcquire(&cps->ckpt_lck);
-                       new_started = cps->ckpt_started;
-                       SpinLockRelease(&cps->ckpt_lck);
+                       SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
+                       new_started = CheckpointerShmem->ckpt_started;
+                       SpinLockRelease(&CheckpointerShmem->ckpt_lck);
 
                        if (new_started != old_started)
                                break;
@@ -1064,10 +1056,10 @@ RequestCheckpoint(int flags)
                {
                        int                     new_done;
 
-                       SpinLockAcquire(&cps->ckpt_lck);
-                       new_done = cps->ckpt_done;
-                       new_failed = cps->ckpt_failed;
-                       SpinLockRelease(&cps->ckpt_lck);
+                       SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
+                       new_done = CheckpointerShmem->ckpt_done;
+                       new_failed = CheckpointerShmem->ckpt_failed;
+                       SpinLockRelease(&CheckpointerShmem->ckpt_lck);
 
                        if (new_done - new_started >= 0)
                                break;
@@ -1368,15 +1360,13 @@ UpdateSharedMemoryConfig(void)
 bool
 FirstCallSinceLastCheckpoint(void)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
        static int      ckpt_done = 0;
        int                     new_done;
        bool            FirstCall = false;
 
-       SpinLockAcquire(&cps->ckpt_lck);
-       new_done = cps->ckpt_done;
-       SpinLockRelease(&cps->ckpt_lck);
+       SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
+       new_done = CheckpointerShmem->ckpt_done;
+       SpinLockRelease(&CheckpointerShmem->ckpt_lck);
 
        if (new_done != ckpt_done)
                FirstCall = true;
index 5a07e1d9a69c946e2a57988aba79f9d2264b8abb..1ce90811cc88aaae0cf592f70ed9c465cb9da1be 100644 (file)
@@ -848,16 +848,13 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
                bool            updated_xmin = false;
                bool            updated_restart = false;
 
-               /* use volatile pointer to prevent code rearrangement */
-               volatile ReplicationSlot *slot = MyReplicationSlot;
+               SpinLockAcquire(&MyReplicationSlot->mutex);
 
-               SpinLockAcquire(&slot->mutex);
-
-               slot->data.confirmed_flush = lsn;
+               MyReplicationSlot->data.confirmed_flush = lsn;
 
                /* if were past the location required for bumping xmin, do so */
-               if (slot->candidate_xmin_lsn != InvalidXLogRecPtr &&
-                       slot->candidate_xmin_lsn <= lsn)
+               if (MyReplicationSlot->candidate_xmin_lsn != InvalidXLogRecPtr &&
+                       MyReplicationSlot->candidate_xmin_lsn <= lsn)
                {
                        /*
                         * We have to write the changed xmin to disk *before* we change
@@ -868,28 +865,28 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
                         * ->effective_xmin once the new state is synced to disk. After a
                         * crash ->effective_xmin is set to ->xmin.
                         */
-                       if (TransactionIdIsValid(slot->candidate_catalog_xmin) &&
-                               slot->data.catalog_xmin != slot->candidate_catalog_xmin)
+                       if (TransactionIdIsValid(MyReplicationSlot->candidate_catalog_xmin) &&
+                               MyReplicationSlot->data.catalog_xmin != MyReplicationSlot->candidate_catalog_xmin)
                        {
-                               slot->data.catalog_xmin = slot->candidate_catalog_xmin;
-                               slot->candidate_catalog_xmin = InvalidTransactionId;
-                               slot->candidate_xmin_lsn = InvalidXLogRecPtr;
+                               MyReplicationSlot->data.catalog_xmin = MyReplicationSlot->candidate_catalog_xmin;
+                               MyReplicationSlot->candidate_catalog_xmin = InvalidTransactionId;
+                               MyReplicationSlot->candidate_xmin_lsn = InvalidXLogRecPtr;
                                updated_xmin = true;
                        }
                }
 
-               if (slot->candidate_restart_valid != InvalidXLogRecPtr &&
-                       slot->candidate_restart_valid <= lsn)
+               if (MyReplicationSlot->candidate_restart_valid != InvalidXLogRecPtr &&
+                       MyReplicationSlot->candidate_restart_valid <= lsn)
                {
-                       Assert(slot->candidate_restart_lsn != InvalidXLogRecPtr);
+                       Assert(MyReplicationSlot->candidate_restart_lsn != InvalidXLogRecPtr);
 
-                       slot->data.restart_lsn = slot->candidate_restart_lsn;
-                       slot->candidate_restart_lsn = InvalidXLogRecPtr;
-                       slot->candidate_restart_valid = InvalidXLogRecPtr;
+                       MyReplicationSlot->data.restart_lsn = MyReplicationSlot->candidate_restart_lsn;
+                       MyReplicationSlot->candidate_restart_lsn = InvalidXLogRecPtr;
+                       MyReplicationSlot->candidate_restart_valid = InvalidXLogRecPtr;
                        updated_restart = true;
                }
 
-               SpinLockRelease(&slot->mutex);
+               SpinLockRelease(&MyReplicationSlot->mutex);
 
                /* first write new xmin to disk, so we know whats up after a crash */
                if (updated_xmin || updated_restart)
@@ -907,9 +904,9 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
                 */
                if (updated_xmin)
                {
-                       SpinLockAcquire(&slot->mutex);
-                       slot->effective_catalog_xmin = slot->data.catalog_xmin;
-                       SpinLockRelease(&slot->mutex);
+                       SpinLockAcquire(&MyReplicationSlot->mutex);
+                       MyReplicationSlot->effective_catalog_xmin = MyReplicationSlot->data.catalog_xmin;
+                       SpinLockRelease(&MyReplicationSlot->mutex);
 
                        ReplicationSlotsComputeRequiredXmin(false);
                        ReplicationSlotsComputeRequiredLSN();
@@ -917,10 +914,8 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
        }
        else
        {
-               volatile ReplicationSlot *slot = MyReplicationSlot;
-
-               SpinLockAcquire(&slot->mutex);
-               slot->data.confirmed_flush = lsn;
-               SpinLockRelease(&slot->mutex);
+               SpinLockAcquire(&MyReplicationSlot->mutex);
+               MyReplicationSlot->data.confirmed_flush = lsn;
+               SpinLockRelease(&MyReplicationSlot->mutex);
        }
 }
index a4b86e039e390ae28a456374a062b124e7698050..92ec6326de12069b05dfd17b7be5253d10b333b6 100644 (file)
@@ -288,15 +288,11 @@ ReplicationSlotCreate(const char *name, bool db_specific,
        slot->in_use = true;
 
        /* We can now mark the slot active, and that makes it our slot. */
-       {
-               volatile ReplicationSlot *vslot = slot;
-
-               SpinLockAcquire(&slot->mutex);
-               Assert(vslot->active_pid == 0);
-               vslot->active_pid = MyProcPid;
-               SpinLockRelease(&slot->mutex);
-               MyReplicationSlot = slot;
-       }
+       SpinLockAcquire(&slot->mutex);
+       Assert(slot->active_pid == 0);
+       slot->active_pid = MyProcPid;
+       SpinLockRelease(&slot->mutex);
+       MyReplicationSlot = slot;
 
        LWLockRelease(ReplicationSlotControlLock);
 
@@ -329,12 +325,10 @@ ReplicationSlotAcquire(const char *name)
 
                if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
                {
-                       volatile ReplicationSlot *vslot = s;
-
                        SpinLockAcquire(&s->mutex);
-                       active_pid = vslot->active_pid;
+                       active_pid = s->active_pid;
                        if (active_pid == 0)
-                               vslot->active_pid = MyProcPid;
+                               s->active_pid = MyProcPid;
                        SpinLockRelease(&s->mutex);
                        slot = s;
                        break;
@@ -380,10 +374,8 @@ ReplicationSlotRelease(void)
        else
        {
                /* Mark slot inactive.  We're not freeing it, just disconnecting. */
-               volatile ReplicationSlot *vslot = slot;
-
                SpinLockAcquire(&slot->mutex);
-               vslot->active_pid = 0;
+               slot->active_pid = 0;
                SpinLockRelease(&slot->mutex);
        }
 
@@ -459,11 +451,10 @@ ReplicationSlotDropAcquired(void)
        }
        else
        {
-               volatile ReplicationSlot *vslot = slot;
                bool            fail_softly = slot->data.persistency == RS_EPHEMERAL;
 
                SpinLockAcquire(&slot->mutex);
-               vslot->active_pid = 0;
+               slot->active_pid = 0;
                SpinLockRelease(&slot->mutex);
 
                ereport(fail_softly ? WARNING : ERROR,
@@ -533,16 +524,13 @@ ReplicationSlotSave(void)
 void
 ReplicationSlotMarkDirty(void)
 {
+       ReplicationSlot *slot = MyReplicationSlot;
        Assert(MyReplicationSlot != NULL);
 
-       {
-               volatile ReplicationSlot *vslot = MyReplicationSlot;
-
-               SpinLockAcquire(&vslot->mutex);
-               MyReplicationSlot->just_dirtied = true;
-               MyReplicationSlot->dirty = true;
-               SpinLockRelease(&vslot->mutex);
-       }
+       SpinLockAcquire(&slot->mutex);
+       MyReplicationSlot->just_dirtied = true;
+       MyReplicationSlot->dirty = true;
+       SpinLockRelease(&slot->mutex);
 }
 
 /*
@@ -557,13 +545,9 @@ ReplicationSlotPersist(void)
        Assert(slot != NULL);
        Assert(slot->data.persistency != RS_PERSISTENT);
 
-       {
-               volatile ReplicationSlot *vslot = slot;
-
-               SpinLockAcquire(&slot->mutex);
-               vslot->data.persistency = RS_PERSISTENT;
-               SpinLockRelease(&slot->mutex);
-       }
+       SpinLockAcquire(&slot->mutex);
+       slot->data.persistency = RS_PERSISTENT;
+       SpinLockRelease(&slot->mutex);
 
        ReplicationSlotMarkDirty();
        ReplicationSlotSave();
@@ -593,14 +577,10 @@ ReplicationSlotsComputeRequiredXmin(bool already_locked)
                if (!s->in_use)
                        continue;
 
-               {
-                       volatile ReplicationSlot *vslot = s;
-
-                       SpinLockAcquire(&s->mutex);
-                       effective_xmin = vslot->effective_xmin;
-                       effective_catalog_xmin = vslot->effective_catalog_xmin;
-                       SpinLockRelease(&s->mutex);
-               }
+               SpinLockAcquire(&s->mutex);
+               effective_xmin = s->effective_xmin;
+               effective_catalog_xmin = s->effective_catalog_xmin;
+               SpinLockRelease(&s->mutex);
 
                /* check the data xmin */
                if (TransactionIdIsValid(effective_xmin) &&
@@ -641,13 +621,9 @@ ReplicationSlotsComputeRequiredLSN(void)
                if (!s->in_use)
                        continue;
 
-               {
-                       volatile ReplicationSlot *vslot = s;
-
-                       SpinLockAcquire(&s->mutex);
-                       restart_lsn = vslot->data.restart_lsn;
-                       SpinLockRelease(&s->mutex);
-               }
+               SpinLockAcquire(&s->mutex);
+               restart_lsn = s->data.restart_lsn;
+               SpinLockRelease(&s->mutex);
 
                if (restart_lsn != InvalidXLogRecPtr &&
                        (min_required == InvalidXLogRecPtr ||
@@ -684,7 +660,7 @@ ReplicationSlotsComputeLogicalRestartLSN(void)
 
        for (i = 0; i < max_replication_slots; i++)
        {
-               volatile ReplicationSlot *s;
+               ReplicationSlot *s;
                XLogRecPtr      restart_lsn;
 
                s = &ReplicationSlotCtl->replication_slots[i];
@@ -733,7 +709,7 @@ ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive)
        LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
        for (i = 0; i < max_replication_slots; i++)
        {
-               volatile ReplicationSlot *s;
+               ReplicationSlot *s;
 
                s = &ReplicationSlotCtl->replication_slots[i];
 
@@ -1023,14 +999,10 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
        bool            was_dirty;
 
        /* first check whether there's something to write out */
-       {
-               volatile ReplicationSlot *vslot = slot;
-
-               SpinLockAcquire(&vslot->mutex);
-               was_dirty = vslot->dirty;
-               vslot->just_dirtied = false;
-               SpinLockRelease(&vslot->mutex);
-       }
+       SpinLockAcquire(&slot->mutex);
+       was_dirty = slot->dirty;
+       slot->just_dirtied = false;
+       SpinLockRelease(&slot->mutex);
 
        /* and don't do anything if there's nothing to write */
        if (!was_dirty)
@@ -1124,14 +1096,10 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
         * Successfully wrote, unset dirty bit, unless somebody dirtied again
         * already.
         */
-       {
-               volatile ReplicationSlot *vslot = slot;
-
-               SpinLockAcquire(&vslot->mutex);
-               if (!vslot->just_dirtied)
-                       vslot->dirty = false;
-               SpinLockRelease(&vslot->mutex);
-       }
+       SpinLockAcquire(&slot->mutex);
+       if (!slot->just_dirtied)
+               slot->dirty = false;
+       SpinLockRelease(&slot->mutex);
 
        LWLockRelease(slot->io_in_progress_lock);
 }
index 41e57f24397348c183d3a1eab77455042644375b..183a3a553db3904de9be417d0ab1941e096d0240 100644 (file)
@@ -192,9 +192,7 @@ WalReceiverMain(void)
        TimeLineID      startpointTLI;
        TimeLineID      primaryTLI;
        bool            first_stream;
-
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
        TimestampTz last_recv_timestamp;
        bool            ping_sent;
 
@@ -559,8 +557,7 @@ WalReceiverMain(void)
 static void
 WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
        int                     state;
 
        SpinLockAcquire(&walrcv->mutex);
@@ -693,8 +690,7 @@ WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
 static void
 WalRcvDie(int code, Datum arg)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
 
        /* Ensure that all WAL records received are flushed to disk */
        XLogWalRcvFlush(true);
@@ -974,8 +970,7 @@ XLogWalRcvFlush(bool dying)
 {
        if (LogstreamResult.Flush < LogstreamResult.Write)
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalRcvData *walrcv = WalRcv;
+               WalRcvData *walrcv = WalRcv;
 
                issue_xlog_fsync(recvFile, recvSegNo);
 
@@ -1179,8 +1174,7 @@ XLogWalRcvSendHSFeedback(bool immed)
 static void
 ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
 
        TimestampTz lastMsgReceiptTime = GetCurrentTimestamp();
 
index f77a790fd878527c667381d344b40ed3917e349d..4452f25d72a86e5536a80772130e57bd41bb3aa6 100644 (file)
@@ -72,8 +72,7 @@ WalRcvShmemInit(void)
 bool
 WalRcvRunning(void)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
        WalRcvState state;
        pg_time_t       startTime;
 
@@ -118,8 +117,7 @@ WalRcvRunning(void)
 bool
 WalRcvStreaming(void)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
        WalRcvState state;
        pg_time_t       startTime;
 
@@ -165,8 +163,7 @@ WalRcvStreaming(void)
 void
 ShutdownWalRcv(void)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
        pid_t           walrcvpid = 0;
 
        /*
@@ -227,8 +224,7 @@ void
 RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
                                         const char *slotname)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
        bool            launch = false;
        pg_time_t       now = (pg_time_t) time(NULL);
 
@@ -298,8 +294,7 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
 XLogRecPtr
 GetWalRcvWriteRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
        XLogRecPtr      recptr;
 
        SpinLockAcquire(&walrcv->mutex);
@@ -320,9 +315,7 @@ GetWalRcvWriteRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI)
 int
 GetReplicationApplyDelay(void)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
-
+       WalRcvData *walrcv = WalRcv;
        XLogRecPtr      receivePtr;
        XLogRecPtr      replayPtr;
 
@@ -359,8 +352,7 @@ GetReplicationApplyDelay(void)
 int
 GetReplicationTransferLatency(void)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalRcvData *walrcv = WalRcv;
+       WalRcvData *walrcv = WalRcv;
 
        TimestampTz lastMsgSendTime;
        TimestampTz lastMsgReceiptTime;
index c95fe75a7215151ec4049b9d6731c9b52c9c383d..c6043cd3ce4ed6a5fec977b3b4b57efc26e58b3a 100644 (file)
@@ -641,8 +641,7 @@ StartReplication(StartReplicationCmd *cmd)
 
                /* Initialize shared memory status, too */
                {
-                       /* use volatile pointer to prevent code rearrangement */
-                       volatile WalSnd *walsnd = MyWalSnd;
+                       WalSnd *walsnd = MyWalSnd;
 
                        SpinLockAcquire(&walsnd->mutex);
                        walsnd->sentPtr = sentPtr;
@@ -990,8 +989,7 @@ StartLogicalReplication(StartReplicationCmd *cmd)
 
        /* Also update the sent position status in shared memory */
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = MyWalSnd;
+               WalSnd *walsnd = MyWalSnd;
 
                SpinLockAcquire(&walsnd->mutex);
                walsnd->sentPtr = MyReplicationSlot->data.restart_lsn;
@@ -1494,9 +1492,7 @@ static void
 PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
 {
        bool            changed = false;
-
-       /* use volatile pointer to prevent code rearrangement */
-       volatile ReplicationSlot *slot = MyReplicationSlot;
+       ReplicationSlot *slot = MyReplicationSlot;
 
        Assert(lsn != InvalidXLogRecPtr);
        SpinLockAcquire(&slot->mutex);
@@ -1554,8 +1550,7 @@ ProcessStandbyReplyMessage(void)
         * standby.
         */
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = MyWalSnd;
+               WalSnd *walsnd = MyWalSnd;
 
                SpinLockAcquire(&walsnd->mutex);
                walsnd->write = writePtr;
@@ -1584,7 +1579,7 @@ static void
 PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin)
 {
        bool            changed = false;
-       volatile ReplicationSlot *slot = MyReplicationSlot;
+       ReplicationSlot *slot = MyReplicationSlot;
 
        SpinLockAcquire(&slot->mutex);
        MyPgXact->xmin = InvalidTransactionId;
@@ -1934,8 +1929,7 @@ InitWalSenderSlot(void)
         */
        for (i = 0; i < max_wal_senders; i++)
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
+               WalSnd *walsnd = &WalSndCtl->walsnds[i];
 
                SpinLockAcquire(&walsnd->mutex);
 
@@ -2145,8 +2139,7 @@ retry:
         */
        if (am_cascading_walsender)
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = MyWalSnd;
+               WalSnd *walsnd = MyWalSnd;
                bool            reload;
 
                SpinLockAcquire(&walsnd->mutex);
@@ -2384,8 +2377,7 @@ XLogSendPhysical(void)
 
        /* Update shared memory status */
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = MyWalSnd;
+               WalSnd *walsnd = MyWalSnd;
 
                SpinLockAcquire(&walsnd->mutex);
                walsnd->sentPtr = sentPtr;
@@ -2447,8 +2439,7 @@ XLogSendLogical(void)
 
        /* Update shared memory status */
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = MyWalSnd;
+               WalSnd *walsnd = MyWalSnd;
 
                SpinLockAcquire(&walsnd->mutex);
                walsnd->sentPtr = sentPtr;
@@ -2539,8 +2530,7 @@ WalSndRqstFileReload(void)
 
        for (i = 0; i < max_wal_senders; i++)
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
+               WalSnd *walsnd = &WalSndCtl->walsnds[i];
 
                if (walsnd->pid == 0)
                        continue;
@@ -2692,8 +2682,7 @@ WalSndWakeup(void)
 void
 WalSndSetState(WalSndState state)
 {
-       /* use volatile pointer to prevent code rearrangement */
-       volatile WalSnd *walsnd = MyWalSnd;
+       WalSnd *walsnd = MyWalSnd;
 
        Assert(am_walsender);
 
@@ -2777,8 +2766,7 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
 
        for (i = 0; i < max_wal_senders; i++)
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
+               WalSnd *walsnd = &WalSndCtl->walsnds[i];
                XLogRecPtr      sentPtr;
                XLogRecPtr      write;
                XLogRecPtr      flush;
@@ -2934,8 +2922,7 @@ GetOldestWALSendPointer(void)
 
        for (i = 0; i < max_wal_senders; i++)
        {
-               /* use volatile pointer to prevent code rearrangement */
-               volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
+               WalSnd *walsnd = &WalSndCtl->walsnds[i];
                XLogRecPtr      recptr;
 
                if (walsnd->pid == 0)