Size newFree;
void *newSpace;
- /* use volatile pointer to prevent code rearrangement */
- volatile PGShmemHeader *shmemseghdr = ShmemSegHdr;
-
/*
* ensure all space is adequately aligned.
*/
size = MAXALIGN(size);
- Assert(shmemseghdr != NULL);
+ Assert(ShmemSegHdr != NULL);
SpinLockAcquire(ShmemLock);
- newStart = shmemseghdr->freeoffset;
+ newStart = ShmemSegHdr->freeoffset;
/* extra alignment for large requests, since they are probably buffers */
if (size >= BLCKSZ)
newStart = BUFFERALIGN(newStart);
newFree = newStart + size;
- if (newFree <= shmemseghdr->totalsize)
+ if (newFree <= ShmemSegHdr->totalsize)
{
newSpace = (void *) ((char *) ShmemBase + newStart);
- shmemseghdr->freeoffset = newFree;
+ ShmemSegHdr->freeoffset = newFree;
}
else
newSpace = NULL;
}
/* Update current value of maxMsgNum using spinlock */
- {
- /* use volatile pointer to prevent code rearrangement */
- volatile SISeg *vsegP = segP;
-
- SpinLockAcquire(&vsegP->msgnumLock);
- vsegP->maxMsgNum = max;
- SpinLockRelease(&vsegP->msgnumLock);
- }
+ SpinLockAcquire(&segP->msgnumLock);
+ segP->maxMsgNum = max;
+ SpinLockRelease(&segP->msgnumLock);
/*
* Now that the maxMsgNum change is globally visible, we give everyone
stateP->hasMessages = false;
/* Fetch current value of maxMsgNum using spinlock */
- {
- /* use volatile pointer to prevent code rearrangement */
- volatile SISeg *vsegP = segP;
-
- SpinLockAcquire(&vsegP->msgnumLock);
- max = vsegP->maxMsgNum;
- SpinLockRelease(&vsegP->msgnumLock);
- }
+ SpinLockAcquire(&segP->msgnumLock);
+ max = segP->maxMsgNum;
+ SpinLockRelease(&segP->msgnumLock);
if (stateP->resetState)
{
case HASH_REMOVE:
if (currBucket != NULL)
{
- /* use volatile pointer to prevent code rearrangement */
- volatile HASHHDR *hctlv = hctl;
-
/* if partitioned, must lock to touch nentries and freeList */
- if (IS_PARTITIONED(hctlv))
- SpinLockAcquire(&hctlv->mutex);
+ if (IS_PARTITIONED(hctl))
+ SpinLockAcquire(&hctl->mutex);
- Assert(hctlv->nentries > 0);
- hctlv->nentries--;
+ Assert(hctl->nentries > 0);
+ hctl->nentries--;
/* remove record from hash bucket's chain. */
*prevBucketPtr = currBucket->link;
/* add the record to the freelist for this table. */
- currBucket->link = hctlv->freeList;
- hctlv->freeList = currBucket;
+ currBucket->link = hctl->freeList;
+ hctl->freeList = currBucket;
- if (IS_PARTITIONED(hctlv))
- SpinLockRelease(&hctlv->mutex);
+ if (IS_PARTITIONED(hctl))
+ SpinLockRelease(&hctl->mutex);
/*
* better hope the caller is synchronizing access to this
static HASHBUCKET
get_hash_entry(HTAB *hashp)
{
- /* use volatile pointer to prevent code rearrangement */
- volatile HASHHDR *hctlv = hashp->hctl;
+ HASHHDR *hctl = hashp->hctl;
HASHBUCKET newElement;
for (;;)
{
/* if partitioned, must lock to touch nentries and freeList */
- if (IS_PARTITIONED(hctlv))
- SpinLockAcquire(&hctlv->mutex);
+ if (IS_PARTITIONED(hctl))
+ SpinLockAcquire(&hctl->mutex);
/* try to get an entry from the freelist */
- newElement = hctlv->freeList;
+ newElement = hctl->freeList;
if (newElement != NULL)
break;
/* no free elements. allocate another chunk of buckets */
- if (IS_PARTITIONED(hctlv))
- SpinLockRelease(&hctlv->mutex);
+ if (IS_PARTITIONED(hctl))
+ SpinLockRelease(&hctl->mutex);
- if (!element_alloc(hashp, hctlv->nelem_alloc))
+ if (!element_alloc(hashp, hctl->nelem_alloc))
{
/* out of memory */
return NULL;
}
/* remove entry from freelist, bump nentries */
- hctlv->freeList = newElement->link;
- hctlv->nentries++;
+ hctl->freeList = newElement->link;
+ hctl->nentries++;
- if (IS_PARTITIONED(hctlv))
- SpinLockRelease(&hctlv->mutex);
+ if (IS_PARTITIONED(hctl))
+ SpinLockRelease(&hctl->mutex);
return newElement;
}
static bool
element_alloc(HTAB *hashp, int nelem)
{
- /* use volatile pointer to prevent code rearrangement */
- volatile HASHHDR *hctlv = hashp->hctl;
+ HASHHDR *hctl = hashp->hctl;
Size elementSize;
HASHELEMENT *firstElement;
HASHELEMENT *tmpElement;
return false;
/* Each element has a HASHELEMENT header plus user data. */
- elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctlv->entrysize);
+ elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
CurrentDynaHashCxt = hashp->hcxt;
firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
}
/* if partitioned, must lock to touch freeList */
- if (IS_PARTITIONED(hctlv))
- SpinLockAcquire(&hctlv->mutex);
+ if (IS_PARTITIONED(hctl))
+ SpinLockAcquire(&hctl->mutex);
/* freelist could be nonempty if two backends did this concurrently */
- firstElement->link = hctlv->freeList;
- hctlv->freeList = prevElement;
+ firstElement->link = hctl->freeList;
+ hctl->freeList = prevElement;
- if (IS_PARTITIONED(hctlv))
- SpinLockRelease(&hctlv->mutex);
+ if (IS_PARTITIONED(hctl))
+ SpinLockRelease(&hctl->mutex);
return true;
}