if (!target) {
if (!(target = malloc(sizeof(struct dev_node))))
- return -ENOMEM;
+ return 0;
memcpy(target, source, sizeof(struct dev_node));
// let's not distort the deltas when a new node is created ...
memcpy(&target->old, &target->new, sizeof(struct dev_data));
node_classify(target);
node_add(info, target);
- return 0;
+ return 1;
}
// remember history from last time around ...
memcpy(&source->old, &target->new, sizeof(struct dev_data));
source->next = target->next;
// finally 'update' the existing node struct ...
memcpy(target, source, sizeof(struct dev_node));
- return 0;
+ return 1;
} // end: node_update
*/
if (numitems < 1
|| (void *)items < (void *)(unsigned long)(2 * DISKSTATS_logical_end))
- return -1;
+ return 1;
for (i = 0; i < numitems; i++) {
// a diskstats_item is currently unsigned, but we'll protect our future
if (items[i] < 0)
- return -1;
+ return 1;
if (items[i] >= DISKSTATS_logical_end)
- return -1;
+ return 1;
}
return 0;
* Read the data out of /proc/diskstats putting the information
* into the supplied info structure
*
- * Returns: 0 on success, negative on error
+ * Returns: 0 on success, 1 on error
*/
static int diskstats_read_failed (
struct diskstats_info *info)
if (!info->diskstats_fp
&& (!(info->diskstats_fp = fopen(DISKSTATS_FILE, "r"))))
- return -errno;
+ return 1;
if (fseek(info->diskstats_fp, 0L, SEEK_SET) == -1)
- return -errno;
+ return 1;
info->old_stamp = info->new_stamp;
info->new_stamp = time(NULL);
, &node.new.io_wtime);
if (rc != 14) {
- if (errno != 0)
- return -errno;
- return -EIO;
+ errno = ERANGE;
+ return 1;
}
node.stamped = info->new_stamp;
- if ((rc = node_update(info, &node)))
- return rc;
+ if (!node_update(info, &node))
+ return 1; // here, errno was set to ENOMEM
}
return 0;
}
if (!info->fetch_ext.extents) {
if (!(ext = diskstats_stacks_alloc(&info->fetch_ext, n_alloc)))
- return -ENOMEM;
+ return -1; // here, errno was set to ENOMEM
memset(info->fetch.anchor, 0, sizeof(void *) * n_alloc);
memcpy(info->fetch.anchor, ext->stacks, sizeof(void *) * n_alloc);
diskstats_itemize_stacks_all(&info->fetch_ext);
n_alloc += STACKS_INCR;
if ((!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc)))
|| (!(ext = diskstats_stacks_alloc(&info->fetch_ext, STACKS_INCR))))
- return -1;
+ return -1; // here, errno was set to ENOMEM
memcpy(info->fetch.anchor + n_inuse, ext->stacks, sizeof(void *) * STACKS_INCR);
}
diskstats_assign_results(info->fetch.anchor[n_inuse], node);
if (n_saved < n_inuse + 1) {
n_saved = n_inuse + 1;
if (!(info->fetch.results.stacks = realloc(info->fetch.results.stacks, sizeof(void *) * n_saved)))
- return -ENOMEM;
+ return -1;
}
memcpy(info->fetch.results.stacks, info->fetch.anchor, sizeof(void *) * n_inuse);
info->fetch.results.stacks[n_inuse] = NULL;
int numitems)
{
if (diskstats_items_check_failed(this, items, numitems))
- return -EINVAL;
+ return -1;
/* is this the first time or have things changed since we were last called?
if so, gotta' redo all of our stacks stuff ... */
if (this->numitems != numitems + 1
|| memcmp(this->items, items, sizeof(enum diskstats_item) * numitems)) {
// allow for our DISKSTATS_logical_end
if (!(this->items = realloc(this->items, sizeof(enum diskstats_item) * (numitems + 1))))
- return -ENOMEM;
+ return -1; // here, errno was set to ENOMEM
memcpy(this->items, items, sizeof(enum diskstats_item) * numitems);
this->items[numitems] = DISKSTATS_logical_end;
this->numitems = numitems + 1;
struct diskstats_info **info)
{
struct diskstats_info *p;
- int rc;
if (info == NULL)
return -EINVAL;
-
if (!(p = calloc(1, sizeof(struct diskstats_info))))
return -ENOMEM;
1) ensure there will be no problems with subsequent access |
2) make delta results potentially useful, even if 1st time |
3) elimnate need for history distortions 1st time 'switch' | */
- if ((rc = diskstats_read_failed(p))) {
+ if (diskstats_read_failed(p)) {
procps_diskstats_unref(&p);
- return rc;
+ return -errno;
}
*info = p;
(*info)->refcount--;
if ((*info)->refcount < 1) {
+ int errno_sav = errno;
+
if ((*info)->diskstats_fp) {
fclose((*info)->diskstats_fp);
(*info)->diskstats_fp = NULL;
free(*info);
*info = NULL;
+
+ errno = errno_sav;
return 0;
}
return (*info)->refcount;
struct dev_node *node;
time_t cur_secs;
+ errno = EINVAL;
if (info == NULL)
return NULL;
if (item < 0 || item >= DISKSTATS_logical_end)
return NULL;
+ errno = 0;
/* we will NOT read the diskstat file with every call - rather, we'll offer
a granularity of 1 second between reads ... */
}
info->get_this.item = item;
-// with 'get', we must NOT honor the usual 'noop' guarantee
-// if (item > DISKSTATS_noop)
- info->get_this.result.ul_int = 0;
+ // with 'get', we must NOT honor the usual 'noop' guarantee
+ info->get_this.result.ul_int = 0;
- if (!(node = node_get(info, name)))
+ if (!(node = node_get(info, name))) {
+ errno = ENXIO;
return NULL;
+ }
Item_table[item].setsfunc(&info->get_this, node);
return &info->get_this;
enum diskstats_item *items,
int numitems)
{
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
-
if (0 > diskstats_stacks_reconfig_maybe(&info->fetch_ext, items, numitems))
- return NULL;
+ return NULL; // here, errno may be overridden with ENOMEM
+ errno = 0;
if (info->fetch_ext.dirty_stacks)
diskstats_cleanup_stacks_all(&info->fetch_ext);
if (diskstats_read_failed(info))
return NULL;
- diskstats_stacks_fetch(info);
+ if (0 > diskstats_stacks_fetch(info))
+ return NULL;
info->fetch_ext.dirty_stacks = 1;
return &info->fetch.results;
{
struct dev_node *node;
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
-
if (0 > diskstats_stacks_reconfig_maybe(&info->select_ext, items, numitems))
- return NULL;
+ return NULL; // here, errno may be overridden with ENOMEM
+ errno = 0;
if (!info->select_ext.extents
&& (!diskstats_stacks_alloc(&info->select_ext, 1)))
if (diskstats_read_failed(info))
return NULL;
- if (!(node = node_get(info, name)))
+ if (!(node = node_get(info, name))) {
+ errno = ENXIO;
return NULL;
+ }
diskstats_assign_results(info->select_ext.extents->stacks[0], node);
info->select_ext.dirty_stacks = 1;
struct sort_parms parms;
int offset;
+ errno = EINVAL;
if (info == NULL || stacks == NULL)
return NULL;
-
// a diskstats_item is currently unsigned, but we'll protect our future
if (sortitem < 0 || sortitem >= DISKSTATS_logical_end)
return NULL;
return NULL;
++p;
}
- parms.offset = offset;
- parms.order = order;
+ errno = 0;
+ parms.order = order;
qsort_r(stacks, numstacked, sizeof(void *), (QSR_t)Item_table[p->item].sortfunc, &parms);
return stacks;
} // end: procps_diskstats_sort
*/
if (numitems < 1
|| (void *)items < (void *)(unsigned long)(2 * MEMINFO_logical_end))
- return -1;
+ return 1;
for (i = 0; i < numitems; i++) {
// a meminfo_item is currently unsigned, but we'll protect our future
if (items[i] < 0)
- return -1;
+ return 1;
if (items[i] >= MEMINFO_logical_end)
- return -1;
+ return 1;
}
return 0;
struct meminfo_info *info)
{
#define htVAL(f) e.key = STRINGIFY(f) ":"; e.data = &info->hist.new. f; \
- if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return -errno;
+ if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return 1;
#define htXTRA(k,f) e.key = STRINGIFY(k) ":"; e.data = &info->hist.new. f; \
- if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return -errno;
+ if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return 1;
ENTRY e, *ep;
size_t n;
// will also include those derived fields (more is better)
n = sizeof(struct meminfo_data) / sizeof(unsigned long);
// we'll follow the hsearch recommendation of an extra 25%
- hcreate_r(n + (n / 4), &info->hashtab);
+ if (!hcreate_r(n + (n / 4), &info->hashtab))
+ return 1;
htVAL(Active)
htXTRA(Active(anon), Active_anon)
if (-1 == info->meminfo_fd
&& (info->meminfo_fd = open(MEMINFO_FILE, O_RDONLY)) == -1)
- return -errno;
+ return 1;
if (lseek(info->meminfo_fd, 0L, SEEK_SET) == -1)
- return -errno;
+ return 1;
for (;;) {
if ((size = read(info->meminfo_fd, buf, sizeof(buf)-1)) < 0) {
if (errno == EINTR || errno == EAGAIN)
continue;
- return -errno;
+ return 1;
}
break;
}
- if (size == 0)
- return -1;
+ if (size == 0) {
+ errno = EIO;
+ return 1;
+ }
buf[size] = '\0';
head = buf;
struct meminfo_info **info)
{
struct meminfo_info *p;
- int rc;
if (info == NULL || *info != NULL)
return -EINVAL;
p->refcount = 1;
p->meminfo_fd = -1;
- if ((rc = meminfo_make_hash_failed(p))) {
+ if (meminfo_make_hash_failed(p)) {
free(p);
- return rc;
+ return -errno;
}
/* do a priming read here for the following potential benefits: |
1) ensure there will be no problems with subsequent access |
2) make delta results potentially useful, even if 1st time |
3) elimnate need for history distortions 1st time 'switch' | */
- if ((rc = meminfo_read_failed(p))) {
+ if (meminfo_read_failed(p)) {
procps_meminfo_unref(&p);
- return rc;
+ return -errno;
}
*info = p;
(*info)->refcount--;
if ((*info)->refcount < 1) {
+ int errno_sav = errno;
+
if ((*info)->extents)
meminfo_extents_free_all((*info));
if ((*info)->items)
free(*info);
*info = NULL;
+
+ errno = errno_sav;
return 0;
}
return (*info)->refcount;
static time_t sav_secs;
time_t cur_secs;
+ errno = EINVAL;
if (info == NULL)
return NULL;
if (item < 0 || item >= MEMINFO_logical_end)
return NULL;
+ errno = 0;
/* we will NOT read the meminfo file with every call - rather, we'll offer
a granularity of 1 second between reads ... */
}
info->get_this.item = item;
-// with 'get', we must NOT honor the usual 'noop' guarantee
-// if (item > MEMINFO_noop)
- info->get_this.result.ul_int = 0;
+ // with 'get', we must NOT honor the usual 'noop' guarantee
+ info->get_this.result.ul_int = 0;
Item_table[item].setsfunc(&info->get_this, &info->hist);
return &info->get_this;
enum meminfo_item *items,
int numitems)
{
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
if (meminfo_items_check_failed(numitems, items))
return NULL;
+ errno = 0;
/* is this the first time or have things changed since we were last called?
if so, gotta' redo all of our stacks stuff ... */
PROCTAB *get_PT; // oldlib interface for active 'get'
struct stacks_extent *get_ext; // an extent used for active 'get'
enum pids_fetch_type get_type; // last known type of 'get' request
+ int seterr; // an ENOMEM encountered during assign
};
tot = strlen(src) + 1; // prep for our vectors
adj = (pSZ-1) - ((tot + pSZ-1) & (pSZ-1)); // calc alignment bytes
cpy = calloc(1, tot + adj + (2 * pSZ)); // get new larger buffer
- if (!cpy) return NULL; // we no longer use xcalloc
+ if (!cpy) return NULL; // oops, looks like ENOMEM
snprintf(cpy, tot, "%s", src); // duplicate their string
vec = (char**)(cpy + tot + adj); // prep pointer to pointers
*vec = cpy; // point 1st vector to string
R->result. t = (long)(P-> x) << I -> pgs2k_shift; }
/* strdup of a static char array */
#define DUP_set(e,x) setDECL(e) { \
- (void)I; R->result.str = strdup(P-> x); }
+ if (!(R->result.str = strdup(P-> x))) I->seterr = 1; }
/* regular assignment copy */
#define REG_set(e,t,x) setDECL(e) { \
(void)I; R->result. t = P-> x; }
/* take ownership of a normal single string if possible, else return
some sort of hint that they duplicated this char * item ... */
#define STR_set(e,x) setDECL(e) { \
- (void)I; if (NULL != P-> x) { R->result.str = P-> x; P-> x = NULL; } \
- else R->result.str = strdup("[ duplicate " STRINGIFY(e) " ]"); }
+ if (NULL != P-> x) { R->result.str = P-> x; P-> x = NULL; } \
+ else { R->result.str = strdup("[ duplicate " STRINGIFY(e) " ]"); \
+ if (!R->result.str) I->seterr = 1; } }
/* take ownership of true vectorized strings if possible, else return
some sort of hint that they duplicated this char ** item ... */
#define VEC_set(e,x) setDECL(e) { \
- (void)I; if (NULL != P-> x) { R->result.strv = P-> x; P-> x = NULL; } \
- else R->result.strv = pids_vectorize_this("[ duplicate " STRINGIFY(e) " ]"); }
+ if (NULL != P-> x) { R->result.strv = P-> x; P-> x = NULL; } \
+ else { R->result.strv = pids_vectorize_this("[ duplicate " STRINGIFY(e) " ]"); \
+ if (!R->result.str) I->seterr = 1; } }
setDECL(noop) { (void)I; (void)R; (void)P; return; }
setDECL(TIME_ELAPSED) { unsigned long long t = P->start_time / I->hertz; R->result.ull_int = I->boot_seconds >= t ? (I->boot_seconds - t) : 0; }
REG_set(TIME_START, ull_int, start_time)
REG_set(TTY, s_int, tty)
-setDECL(TTY_NAME) { char buf[64]; (void)I; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV); R->result.str = strdup(buf); }
-setDECL(TTY_NUMBER) { char buf[64]; (void)I; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV|ABBREV_TTY|ABBREV_PTS); R->result.str = strdup(buf); }
+setDECL(TTY_NAME) { char buf[64]; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV); if (!(R->result.str = strdup(buf))) I->seterr = 1; }
+setDECL(TTY_NUMBER) { char buf[64]; dev_to_tty(buf, sizeof(buf), P->tty, P->tid, ABBREV_DEV|ABBREV_TTY|ABBREV_PTS); if (!(R->result.str = strdup(buf))) I->seterr = 1; }
REG_set(VM_DATA, ul_int, vm_data)
REG_set(VM_EXE, ul_int, vm_exe)
REG_set(VM_LIB, ul_int, vm_lib)
REG_set(VM_SWAP, ul_int, vm_swap)
setDECL(VM_USED) { (void)I; R->result.ul_int = P->vm_swap + P->vm_rss; }
REG_set(VSIZE_PGS, ul_int, vsize)
-setDECL(WCHAN_NAME) { (void)I; R->result.str = strdup(lookup_wchan(P->tid)); }
+setDECL(WCHAN_NAME) { if (!(R->result.str = strdup(lookup_wchan(P->tid)))) I->seterr = 1;; }
#undef setDECL
#undef CVT_set
Hr(PHist_sav) = realloc(Hr(PHist_sav), sizeof(HST_t) * Hr(HHist_siz));
Hr(PHist_new) = realloc(Hr(PHist_new), sizeof(HST_t) * Hr(HHist_siz));
if (!Hr(PHist_sav) || !Hr(PHist_new))
- return -ENOMEM;
+ return 0;
}
Hr(PHist_new[nSLOT].pid) = p->tid;
Hr(PHist_new[nSLOT].maj) = p->maj_flt;
}
nSLOT++;
- return 0;
+ return 1;
#undef nSLOT
} // end: pids_make_hist
// ___ Standard Private Functions |||||||||||||||||||||||||||||||||||||||||||||
-static inline void pids_assign_results (
+static inline int pids_assign_results (
struct pids_info *info,
struct pids_stack *stack,
proc_t *p)
{
struct pids_result *this = stack->head;
+ info->seterr = 0;
for (;;) {
enum pids_item item = this->item;
if (item >= PIDS_logical_end)
Item_table[item].setsfunc(info, this, p);
++this;
}
- return;
+ return !info->seterr;
} // end: pids_assign_results
*/
if (numitems < 1
|| (void *)items < (void *)0x8000) // twice as big as our largest enum
- return -1;
+ return 1;
+
for (i = 0; i < numitems; i++) {
// a pids_item is currently unsigned, but we'll protect our future
if (items[i] < 0)
- return -1;
+ return 1;
if (items[i] >= PIDS_logical_end) {
- return -1;
+ return 1;
}
}
return 0;
PROCTAB **this)
{
if (*this != NULL) {
+ int errsav = errno;
closeproc(*this);
*this = NULL;
+ errno = errsav;
}
} // end: pids_oldproc_close
++counts->total;
if (info->history_yes)
- return !pids_make_hist(info, p);
+ return pids_make_hist(info, p);
return 1;
} // end: pids_proc_tally
// initialize stuff -----------------------------------
if (!info->fetch.anchor) {
if (!(info->fetch.anchor = calloc(sizeof(void *), STACKS_INCR)))
- return -ENOMEM;
+ return -1;
n_alloc = STACKS_INCR;
}
if (!info->extents) {
if (!(ext = pids_stacks_alloc(info, n_alloc)))
- return -ENOMEM;
+ return -1; // here, errno was set to ENOMEM
memset(info->fetch.anchor, 0, sizeof(void *) * n_alloc);
memcpy(info->fetch.anchor, ext->stacks, sizeof(void *) * n_alloc);
}
while (info->read_something(info->fetch_PT, &task)) {
if (!(n_inuse < n_alloc)) {
n_alloc += STACKS_INCR;
- if ((!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc)))
+ if (!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc))
|| (!(ext = pids_stacks_alloc(info, STACKS_INCR))))
- return -1;
+ return -1; // here, errno was set to ENOMEM
memcpy(info->fetch.anchor + n_inuse, ext->stacks, sizeof(void *) * STACKS_INCR);
}
if (!pids_proc_tally(info, &info->fetch.counts, &task))
- return -1;
- pids_assign_results(info, info->fetch.anchor[n_inuse++], &task);
+ return -1; // here, errno was set to ENOMEM
+ if (!pids_assign_results(info, info->fetch.anchor[n_inuse++], &task))
+ return -1; // here, errno was set to ENOMEM
}
+ /* while the possibility is extremely remote, the readproc.c (read_something)
+ simple_readproc and simple_readtask guys could have encountered this error
+ in which case they would have returned a NULL, thus ending our while loop. */
+ if (errno == ENOMEM)
+ return -1;
// finalize stuff -------------------------------------
/* note: we go to this trouble of maintaining a duplicate of the consolidated |
if (n_saved < n_inuse + 1) {
n_saved = n_inuse + 1;
if (!(info->fetch.results.stacks = realloc(info->fetch.results.stacks, sizeof(void *) * n_saved)))
- return -ENOMEM;
+ return -1;
}
memcpy(info->fetch.results.stacks, info->fetch.anchor, sizeof(void *) * n_inuse);
info->fetch.results.stacks[n_inuse] = NULL;
if (info == NULL || *info != NULL)
return -EINVAL;
-
if (!(p = calloc(1, sizeof(struct pids_info))))
return -ENOMEM;
/* this is very likely the *only* newlib function where the
context (pids_info) of NULL will ever be permitted */
- look_up_our_self(&self);
- if (!return_self)
+ if (!look_up_our_self(&self)
+ || (!return_self))
return NULL;
+ errno = EINVAL;
if (info == NULL)
return NULL;
-
/* with items & numitems technically optional at 'new' time, it's
expected 'reset' will have been called -- but just in case ... */
if (!info->curitems)
return NULL;
+ errno = 0;
if (!(ext = pids_stacks_alloc(info, 1)))
return NULL;
- if (!pids_extent_cut(info, ext))
+ if (!pids_extent_cut(info, ext)) {
+ errno = EADDRNOTAVAIL;
return NULL;
-
+ }
ext->next = info->otherexts;
info->otherexts = ext;
- pids_assign_results(info, ext->stacks[0], &self);
+ if (!pids_assign_results(info, ext->stacks[0], &self))
+ return NULL;
return ext->stacks[0];
} // end: fatal_proc_unmounted
{
static proc_t task; // static for initial zeroes + later dynamic free(s)
+ errno = EINVAL;
if (info == NULL)
return NULL;
if (which != PIDS_FETCH_TASKS_ONLY && which != PIDS_FETCH_THREADS_TOO)
fresh_start:
if (!info->get_ext) {
if (!(info->get_ext = pids_stacks_alloc(info, 1)))
- return NULL;
+ return NULL; // here, errno was overridden with ENOMEM
if (!pids_oldproc_open(&info->get_PT, info->oldflags))
- return NULL;
+ return NULL; // here, errno was overridden with ENOMEM/others
info->get_type = which;
info->read_something = which ? readeither : readproc;
}
info->get_ext = NULL;
goto fresh_start;
}
+ errno = 0;
pids_cleanup_stack(info->get_ext->stacks[0]->head);
if (NULL == info->read_something(info->get_PT, &task))
return NULL;
- pids_assign_results(info, info->get_ext->stacks[0], &task);
-
+ if (!pids_assign_results(info, info->get_ext->stacks[0], &task))
+ return NULL;
return info->get_ext->stacks[0];
} // end: procps_pids_get
{
int rc;
+ errno = EINVAL;
if (info == NULL)
return NULL;
if (which != PIDS_FETCH_TASKS_ONLY && which != PIDS_FETCH_THREADS_TOO)
expected 'reset' will have been called -- but just in case ... */
if (!info->curitems)
return NULL;
+ errno = 0;
if (!pids_oldproc_open(&info->fetch_PT, info->oldflags))
return NULL;
unsigned ids[FILL_ID_MAX + 1];
int rc;
+ errno = EINVAL;
if (info == NULL || these == NULL)
return NULL;
if (numthese < 1 || numthese > FILL_ID_MAX)
expected 'reset' will have been called -- but just in case ... */
if (!info->curitems)
return NULL;
+ errno = 0;
// this zero delimiter is really only needed with PIDS_SELECT_PID
memcpy(ids, these, sizeof(unsigned) * numthese);
pids_oldproc_close(&info->fetch_PT);
// no guarantee any pids/uids were found
- return (rc > -1) ? &info->fetch.results : NULL;
+ return (rc >= 0) ? &info->fetch.results : NULL;
} // end: procps_pids_select
struct pids_result *p;
int offset;
+ errno = EINVAL;
if (info == NULL || stacks == NULL)
return NULL;
// a pids_item is currently unsigned, but we'll protect our future
return NULL;
++p;
}
+ errno = 0;
+
parms.offset = offset;
parms.order = order;
int new_count;
if (info->nodes_used < info->nodes_alloc)
- return 0;
+ return 1;
/* Increment the allocated number of slabs */
new_count = info->nodes_alloc * 5/4+30;
new_nodes = realloc(info->nodes, sizeof(struct slabs_node) * new_count);
if (!new_nodes)
- return -ENOMEM;
+ return 0;
info->nodes = new_nodes;
info->nodes_alloc = new_count;
- return 0;
+ return 1;
} // end: alloc_slabnodes
struct slabinfo_info *info,
struct slabs_node **node)
{
- int retval;
-
if (info->nodes_used == info->nodes_alloc) {
- if ((retval = alloc_slabnodes(info)) < 0)
- return retval;
+ if (!alloc_slabnodes(info))
+ return 0; // here, errno was set to ENOMEM
}
*node = &(info->nodes[info->nodes_used++]);
- return 0;
+ return 1;
} // end: get_slabnode
{
struct slabs_node *node;
char buffer[SLABINFO_LINE_LEN];
- int retval;
int page_size = getpagesize();
struct slabs_summ *slabs = &(info->slabs.new);
if (buffer[0] == '#')
continue;
- if ((retval = get_slabnode(info, &node)) < 0)
- return retval;
+ if (!get_slabnode(info, &node))
+ return 1; // here, errno was set to ENOMEM
if (sscanf(buffer,
"%" STRINGIFY(SLABINFO_NAME_LEN) "s" \
&node->obj_size, &node->objs_per_slab,
&node->pages_per_slab, &node->nr_active_slabs,
&node->nr_slabs) < 8) {
- if (errno != 0)
- return -errno;
- return -EINVAL;
+ errno = ERANGE;
+ return 1;
}
if (!node->name[0])
if (node->obj_size > slabs->max_obj_size)
slabs->max_obj_size = node->obj_size;
- node->cache_size = (unsigned long)node->nr_slabs * node->pages_per_slab
- * page_size;
+ node->cache_size = (unsigned long)node->nr_slabs * node->pages_per_slab * page_size;
if (node->nr_objs) {
node->use = (unsigned int)100 * (node->nr_active_objs / node->nr_objs);
* Read the data out of /proc/slabinfo putting the information
* into the supplied info container
*
- * Returns: 0 on success, negative on error
+ * Returns: 0 on success, 1 on error
*/
static int slabinfo_read_failed (
struct slabinfo_info *info)
{
char line[SLABINFO_LINE_LEN];
- int retval, major, minor;
+ int major, minor;
memcpy(&info->slabs.old, &info->slabs.new, sizeof(struct slabs_summ));
memset(&(info->slabs.new), 0, sizeof(struct slabs_summ));
- if ((retval = alloc_slabnodes(info)) < 0)
- return retval;
+ if (!alloc_slabnodes(info))
+ return 1; // here, errno was set to ENOMEM
memset(info->nodes, 0, sizeof(struct slabs_node)*info->nodes_alloc);
info->nodes_used = 0;
if (NULL == info->slabinfo_fp
&& (info->slabinfo_fp = fopen(SLABINFO_FILE, "r")) == NULL)
- return -errno;
+ return 1;
if (fseek(info->slabinfo_fp, 0L, SEEK_SET) < 0)
- return -errno;
+ return 1;
/* Parse the version string */
if (!fgets(line, SLABINFO_LINE_LEN, info->slabinfo_fp))
- return -errno;
-
- if (sscanf(line, "slabinfo - version: %d.%d", &major, &minor) != 2)
- return -EINVAL;
+ return 1;
- if (major == 2)
- retval = parse_slabinfo20(info);
- else
- return -ERANGE;
+ if (2 != sscanf(line, "slabinfo - version: %d.%d", &major, &minor)
+ || (major != 2)) {
+ errno = ERANGE;
+ return 1;
+ }
- return retval;
+ return parse_slabinfo20(info);
} // end: slabinfo_read_failed
*/
if (numitems < 1
|| (void *)items < (void *)(unsigned long)(2 * SLABINFO_logical_end))
- return -1;
+ return 1;
for (i = 0; i < numitems; i++) {
#ifdef ENFORCE_LOGICAL
continue;
if (items[i] < this->lowest
|| (items[i] > this->highest))
- return -1;
+ return 1;
#else
// a slabinfo_item is currently unsigned, but we'll protect our future
if (items[i] < 0)
- return -1;
+ return 1;
if (items[i] >= SLABINFO_logical_end)
- return -1;
+ return 1;
#endif
}
// initialize stuff -----------------------------------
if (!info->fetch.anchor) {
if (!(info->fetch.anchor = calloc(sizeof(void *), STACKS_INCR)))
- return -ENOMEM;
+ return -1;
n_alloc = STACKS_INCR;
}
if (!info->fetch_ext.extents) {
if (!(ext = slabinfo_stacks_alloc(&info->fetch_ext, n_alloc)))
- return -ENOMEM;
+ return -1; // here, errno was set to ENOMEM
memset(info->fetch.anchor, 0, sizeof(void *) * n_alloc);
memcpy(info->fetch.anchor, ext->stacks, sizeof(void *) * n_alloc);
slabinfo_itemize_stacks_all(&info->fetch_ext);
n_alloc += STACKS_INCR;
if ((!(info->fetch.anchor = realloc(info->fetch.anchor, sizeof(void *) * n_alloc)))
|| (!(ext = slabinfo_stacks_alloc(&info->fetch_ext, STACKS_INCR))))
- return -1;
+ return -1; // here, errno was set to ENOMEM
memcpy(info->fetch.anchor + n_inuse, ext->stacks, sizeof(void *) * STACKS_INCR);
}
slabinfo_assign_results(info->fetch.anchor[n_inuse], &info->slabs, &info->nodes[n_inuse]);
if (n_saved < n_inuse + 1) {
n_saved = n_inuse + 1;
if (!(info->fetch.results.stacks = realloc(info->fetch.results.stacks, sizeof(void *) * n_saved)))
- return -ENOMEM;
+ return -1;
}
memcpy(info->fetch.results.stacks, info->fetch.anchor, sizeof(void *) * n_inuse);
info->fetch.results.stacks[n_inuse] = NULL;
int numitems)
{
if (slabinfo_items_check_failed(this, items, numitems))
- return -EINVAL;
+ return -1;
/* is this the first time or have things changed since we were last called?
if so, gotta' redo all of our stacks stuff ... */
if (this->numitems != numitems + 1
|| memcmp(this->items, items, sizeof(enum slabinfo_item) * numitems)) {
// allow for our SLABINFO_logical_end
if (!(this->items = realloc(this->items, sizeof(enum slabinfo_item) * (numitems + 1))))
- return -ENOMEM;
+ return -1;
memcpy(this->items, items, sizeof(enum slabinfo_item) * numitems);
this->items[numitems] = SLABINFO_logical_end;
this->numitems = numitems + 1;
struct slabinfo_info **info)
{
struct slabinfo_info *p;
- int rc;
- if (info == NULL)
+ if (info == NULL || *info != NULL)
return -EINVAL;
-
if (!(p = calloc(1, sizeof(struct slabinfo_info))))
return -ENOMEM;
1) see if that caller's permissions were sufficient (root) |
2) make delta results potentially useful, even if 1st time |
3) elimnate need for history distortions 1st time 'switch' | */
- if ((rc = slabinfo_read_failed(p))) {
+ if (slabinfo_read_failed(p)) {
procps_slabinfo_unref(&p);
- return rc;
+ return -errno;
}
*info = p;
(*info)->refcount--;
if ((*info)->refcount < 1) {
+ int errno_sav = errno;
+
if ((*info)->slabinfo_fp) {
fclose((*info)->slabinfo_fp);
(*info)->slabinfo_fp = NULL;
free(*info);
*info = NULL;
+
+ errno = errno_sav;
return 0;
}
return (*info)->refcount;
static time_t sav_secs;
time_t cur_secs;
+ errno = EINVAL;
if (info == NULL)
return NULL;
if (item < 0 || item >= SLABINFO_logical_end)
return NULL;
+ errno = 0;
/* we will NOT read the slabinfo file with every call - rather, we'll offer
a granularity of 1 second between reads ... */
}
info->get_this.item = item;
-// with 'get', we must NOT honor the usual 'noop' guarantee
-// if (item > SLABINFO_noop)
- info->get_this.result.ul_int = 0;
+ // with 'get', we must NOT honor the usual 'noop' guarantee
+ info->get_this.result.ul_int = 0;
Item_table[item].setsfunc(&info->get_this, &info->slabs, &info->nul_node);
return &info->get_this;
enum slabinfo_item *items,
int numitems)
{
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
-
if (0 > slabinfo_stacks_reconfig_maybe(&info->fetch_ext, items, numitems))
- return NULL;
+ return NULL; // here, errno may be overridden with ENOMEM
+ errno = 0;
if (info->fetch_ext.dirty_stacks)
slabinfo_cleanup_stacks_all(&info->fetch_ext);
if (slabinfo_read_failed(info))
return NULL;
- slabinfo_stacks_fetch(info);
+ if (0 > slabinfo_stacks_fetch(info))
+ return NULL;
info->fetch_ext.dirty_stacks = 1;
return &info->fetch.results;
enum slabinfo_item *items,
int numitems)
{
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
-
if (0 > slabinfo_stacks_reconfig_maybe(&info->select_ext, items, numitems))
- return NULL;
+ return NULL; // here, errno may be overridden with ENOMEM
+ errno = 0;
if (!info->select_ext.extents
&& (!slabinfo_stacks_alloc(&info->select_ext, 1)))
struct sort_parms parms;
int offset;
+ errno = EINVAL;
if (info == NULL || stacks == NULL)
return NULL;
-
// a slabinfo_item is currently unsigned, but we'll protect our future
if (sortitem < 0 || sortitem >= SLABINFO_logical_end)
return NULL;
return NULL;
++p;
}
+ errno = 0;
+
parms.offset = offset;
parms.order = order;
*/
if (numitems < 1
|| (void *)items < (void *)(unsigned long)(2 * STAT_logical_end))
- return -1;
+ return 1;
for (i = 0; i < numitems; i++) {
// a stat_item is currently unsigned, but we'll protect our future
if (items[i] < 0)
- return -1;
+ return 1;
if (items[i] >= STAT_logical_end) {
- return -1;
+ return 1;
}
}
return 0;
if (!info->cpus.hist.n_alloc) {
info->cpus.hist.tics = calloc(NEWOLD_INCR, sizeof(struct hist_tic));
if (!(info->cpus.hist.tics))
- return -ENOMEM;
+ return 1;
info->cpus.hist.n_alloc = NEWOLD_INCR;
info->cpus.hist.n_inuse = 0;
}
if (!info->stat_fp
&& (!(info->stat_fp = fopen(STAT_FILE, "r"))))
- return -errno;
+ return 1;
fflush(info->stat_fp);
rewind(info->stat_fp);
especially in a massively parallel environment. additionally, each cpu |
line is then frozen in time rather than changing until we get around to |
accessing it. this helps to minimize (not eliminate) some distortions. | */
- tot_read = errno = 0;
+ tot_read = 0;
while ((0 < (num = fread(curPOS, 1, curSIZ, info->stat_fp)))) {
tot_read += num;
if (tot_read < maxSIZ)
break;
maxSIZ += BUFFER_INCR;
if (!(info->stat_buf = realloc(info->stat_buf, maxSIZ)))
- return -ENOMEM;
+ return 1;
};
#undef maxSIZ
#undef curSIZ
#undef curPOS
- if (!feof(info->stat_fp))
- return -errno;
+ if (!feof(info->stat_fp)) {
+ errno = EIO;
+ return 1;
+ }
info->stat_buf[tot_read] = '\0';
bp = info->stat_buf;
, &sum_ptr->new.user, &sum_ptr->new.nice, &sum_ptr->new.system
, &sum_ptr->new.idle, &sum_ptr->new.iowait, &sum_ptr->new.irq
, &sum_ptr->new.sirq, &sum_ptr->new.stolen
- , &sum_ptr->new.guest, &sum_ptr->new.gnice))
- return -1;
+ , &sum_ptr->new.guest, &sum_ptr->new.gnice)) {
+ errno = ERANGE;
+ return 1;
+ }
stat_derive_unique(sum_ptr);
i = 0;
info->cpus.hist.n_alloc += NEWOLD_INCR;
info->cpus.hist.tics = realloc(info->cpus.hist.tics, info->cpus.hist.n_alloc * sizeof(struct hist_tic));
if (!(info->cpus.hist.tics))
- return -ENOMEM;
+ return 1;
goto reap_em_again;
}
struct stacks_extent *ext;
int i;
- if (this == NULL)
- return -EINVAL;
-
// initialize stuff -----------------------------------
if (!this->anchor) {
if (!(this->anchor = calloc(sizeof(void *), STACKS_INCR)))
- return -ENOMEM;
+ return -1;
n_alloc = STACKS_INCR;
}
if (!this->fetch.extents) {
if (!(ext = stat_stacks_alloc(&this->fetch, n_alloc)))
- return -ENOMEM;
+ return -1; // here, errno was set to ENOMEM
memcpy(this->anchor, ext->stacks, sizeof(void *) * n_alloc);
}
if (this->fetch.dirty_stacks)
if (!(i < n_alloc)) {
n_alloc += STACKS_INCR;
if ((!(this->anchor = realloc(this->anchor, sizeof(void *) * n_alloc)))
- || (!(ext = stat_stacks_alloc(&this->fetch, STACKS_INCR)))) {
- return -ENOMEM;
- }
+ || (!(ext = stat_stacks_alloc(&this->fetch, STACKS_INCR))))
+ return -1; // here, errno was set to ENOMEM
memcpy(this->anchor + i, ext->stacks, sizeof(void *) * STACKS_INCR);
}
stat_assign_results(this->anchor[i], &info->sys_hist, &this->hist.tics[i]);
if (n_saved < i + 1) {
n_saved = i + 1;
if (!(this->result.stacks = realloc(this->result.stacks, sizeof(void *) * n_saved)))
- return -ENOMEM;
+ return -1;
}
memcpy(this->result.stacks, this->anchor, sizeof(void *) * i);
this->result.stacks[i] = NULL;
int numitems)
{
if (stat_items_check_failed(numitems, items))
- return -EINVAL;
-
+ return -1;
/* is this the first time or have things changed since we were last called?
if so, gotta' redo all of our stacks stuff ... */
if (this->items->num != numitems + 1
|| memcmp(this->items->enums, items, sizeof(enum stat_item) * numitems)) {
// allow for our STAT_logical_end
if (!(this->items->enums = realloc(this->items->enums, sizeof(enum stat_item) * (numitems + 1))))
- return -ENOMEM;
+ return -1;
memcpy(this->items->enums, items, sizeof(enum stat_item) * numitems);
this->items->enums[numitems] = STAT_logical_end;
this->items->num = numitems + 1;
struct stat_info **info)
{
struct stat_info *p;
- int rc;
if (info == NULL || *info != NULL)
return -EINVAL;
1) ensure there will be no problems with subsequent access |
2) make delta results potentially useful, even if 1st time |
3) elimnate need for history distortions 1st time 'switch' | */
- if ((rc = stat_read_failed(p))) {
+ if (stat_read_failed(p)) {
procps_stat_unref(&p);
- return rc;
+ return -errno;
}
*info = p;
(*info)->refcount--;
if ((*info)->refcount < 1) {
+ int errno_sav = errno;
+
if ((*info)->stat_fp)
fclose((*info)->stat_fp);
if ((*info)->stat_buf)
free(*info);
*info = NULL;
+
+ errno = errno_sav;
return 0;
}
return (*info)->refcount;
static time_t sav_secs;
time_t cur_secs;
+ errno = EINVAL;
if (info == NULL)
return NULL;
if (item < 0 || item >= STAT_logical_end)
return NULL;
+ errno = 0;
/* we will NOT read the source file with every call - rather, we'll offer
a granularity of 1 second between reads ... */
}
info->get_this.item = item;
-// with 'get', we must NOT honor the usual 'noop' guarantee
-// if (item > STAT_noop)
- info->get_this.result.ull_int = 0;
+ // with 'get', we must NOT honor the usual 'noop' guarantee
+ info->get_this.result.ull_int = 0;
Item_table[item].setsfunc(&info->get_this, &info->sys_hist, &info->cpu_hist);
return &info->get_this;
{
int rc;
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
if (what != STAT_REAP_CPUS_ONLY && what != STAT_REAP_CPUS_AND_NODES)
}
}
#endif
-
if (0 > (rc = stat_stacks_reconfig_maybe(&info->cpu_summary, items, numitems)))
- return NULL;
+ return NULL; // here, errno may be overridden with ENOMEM
if (rc) {
stat_extents_free_all(&info->cpus.fetch);
stat_extents_free_all(&info->nodes.fetch);
}
+ errno = 0;
if (stat_read_failed(info))
return NULL;
switch (what) {
case STAT_REAP_CPUS_ONLY:
- if (!stat_stacks_fetch(info, &info->cpus))
+ if (0 > stat_stacks_fetch(info, &info->cpus))
return NULL;
break;
case STAT_REAP_CPUS_AND_NODES:
will have marked (temporarily) all the cpu node ids as invalid | */
if (0 > stat_make_numa_hist(info))
return NULL;
- // tolerate an unexpected absence of libnuma.so ...
- stat_stacks_fetch(info, &info->nodes);
- if (!stat_stacks_fetch(info, &info->cpus))
+ if (0 > stat_stacks_fetch(info, &info->nodes))
+ return NULL;
+ if (0 > stat_stacks_fetch(info, &info->cpus))
return NULL;
break;
default:
enum stat_item *items,
int numitems)
{
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
-
if (0 > stat_stacks_reconfig_maybe(&info->select, items, numitems))
- return NULL;
+ return NULL; // here, errno may be overridden with ENOMEM
+ errno = 0;
if (stat_read_failed(info))
return NULL;
struct sort_parms parms;
int offset;
+ errno = EINVAL;
if (info == NULL || stacks == NULL)
return NULL;
-
// a stat_item is currently unsigned, but we'll protect our future
if (sortitem < 0 || sortitem >= STAT_logical_end)
return NULL;
return NULL;
++p;
}
+ errno = 0;
+
parms.offset = offset;
parms.order = order;
*/
if (numitems < 1
|| (void *)items < (void *)(unsigned long)(2 * VMSTAT_logical_end))
- return -1;
+ return 1;
for (i = 0; i < numitems; i++) {
// a vmstat_item is currently unsigned, but we'll protect our future
if (items[i] < 0)
- return -1;
+ return 1;
if (items[i] >= VMSTAT_logical_end)
- return -1;
+ return 1;
}
return 0;
struct vmstat_info *info)
{
#define htVAL(f) e.key = STRINGIFY(f); e.data = &info->hist.new. f; \
- if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return -errno;
+ if (!hsearch_r(e, ENTER, &ep, &info->hashtab)) return 1;
ENTRY e, *ep;
size_t n;
n = sizeof(struct vmstat_data) / sizeof(unsigned long);
// we'll follow the hsearch recommendation of an extra 25%
- hcreate_r(n + (n / 4), &info->hashtab);
+ if (!hcreate_r(n + (n / 4), &info->hashtab))
+ return 1;
htVAL(allocstall)
htVAL(balloon_deflate)
if (-1 == info->vmstat_fd
&& (info->vmstat_fd = open(VMSTAT_FILE, O_RDONLY)) == -1)
- return -errno;
+ return 1;
if (lseek(info->vmstat_fd, 0L, SEEK_SET) == -1)
- return -errno;
+ return 1;
for (;;) {
if ((size = read(info->vmstat_fd, buf, sizeof(buf)-1)) < 0) {
if (errno == EINTR || errno == EAGAIN)
continue;
- return -errno;
+ return 1;
}
break;
}
- if (size == 0)
- return -1;
+ if (size == 0) {
+ errno = EIO;
+ return 1;
+ }
buf[size] = '\0';
head = buf;
struct vmstat_info **info)
{
struct vmstat_info *p;
- int rc;
if (info == NULL || *info != NULL)
return -EINVAL;
p->refcount = 1;
p->vmstat_fd = -1;
- if ((rc = vmstat_make_hash_failed(p))) {
+ if (vmstat_make_hash_failed(p)) {
free(p);
- return rc;
+ return -errno;
}
/* do a priming read here for the following potential benefits: |
1) ensure there will be no problems with subsequent access |
2) make delta results potentially useful, even if 1st time |
3) elimnate need for history distortions 1st time 'switch' | */
- if ((rc = vmstat_read_failed(p))) {
+ if (vmstat_read_failed(p)) {
procps_vmstat_unref(&p);
- return rc;
+ return -errno;
}
*info = p;
(*info)->refcount--;
if ((*info)->refcount < 1) {
+ int errno_sav = errno;
+
if ((*info)->extents)
vmstat_extents_free_all((*info));
if ((*info)->items)
free(*info);
*info = NULL;
+
+ errno = errno_sav;
return 0;
}
return (*info)->refcount;
static time_t sav_secs;
time_t cur_secs;
+ errno = EINVAL;
if (info == NULL)
return NULL;
if (item < 0 || item >= VMSTAT_logical_end)
return NULL;
+ errno = 0;
/* we will NOT read the vmstat file with every call - rather, we'll offer
a granularity of 1 second between reads ... */
}
info->get_this.item = item;
-// with 'get', we must NOT honor the usual 'noop' guarantee
-// if (item > VMSTAT_noop)
- info->get_this.result.ul_int = 0;
+ // with 'get', we must NOT honor the usual 'noop' guarantee
+ info->get_this.result.ul_int = 0;
Item_table[item].setsfunc(&info->get_this, &info->hist);
return &info->get_this;
enum vmstat_item *items,
int numitems)
{
+ errno = EINVAL;
if (info == NULL || items == NULL)
return NULL;
if (vmstat_items_check_failed(numitems, items))
return NULL;
+ errno = 0;
/* is this the first time or have things changed since we were last called?
if so, gotta' redo all of our stacks stuff ... */