};
static uint64_t
-stat_histo_max(struct stat_array *nva, unsigned int len) {
+stat_histo_max(struct stat_array *nva, unsigned int len)
+{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
- struct stat_array *nva) {
+ struct stat_array *nva)
+{
nvpair_t *tmp;
int ret;
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
-are_all_pools(int argc, char **argv) {
+are_all_pools(int argc, char **argv)
+{
if ((argc == 0) || !*argv)
return (0);
* seconds.
*/
static void
-fsleep(float sec) {
+fsleep(float sec)
+{
struct timespec req;
req.tv_sec = floor(sec);
req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
/* Find the max element in an array of uint64_t values */
uint64_t
-array64_max(uint64_t array[], unsigned int len) {
+array64_max(uint64_t array[], unsigned int len)
+{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
* floating point numbers.
*/
int
-isnumber(char *str) {
+isnumber(char *str)
+{
for (; *str; str++)
if (!(isdigit(*str) || (*str == '.')))
return (0);
#define zpl_forget_cached_acl(ip, ty) forget_cached_acl(ip, ty)
#else
static inline void
-zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer) {
+zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer)
+{
struct posix_acl *older = NULL;
spin_lock(&ip->i_lock);
}
static inline void
-zpl_forget_cached_acl(struct inode *ip, int type) {
+zpl_forget_cached_acl(struct inode *ip, int type)
+{
zpl_set_cached_acl(ip, type, (struct posix_acl *)ACL_NOT_CACHED);
}
#endif /* HAVE_SET_CACHED_ACL_USABLE */
#define __posix_acl_create(acl, gfp, mode) posix_acl_create(acl, gfp, mode)
#else
static inline int
-__posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode) {
+__posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode)
+{
struct posix_acl *oldacl = *acl;
mode_t mode = umode;
int error;
}
static inline int
-__posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep) {
+__posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep)
+{
struct posix_acl *oldacl = *acl;
mode_t mode = *umodep;
int error;
/* END CSTYLED */
#define DEFINE_ACE_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_ace_class, name, \
TP_PROTO(znode_t *zn, zfs_ace_hdr_t *ace, uint32_t mask_matched), \
TP_ARGS(zn, ace, mask_matched))
+/* END CSTYLED */
DEFINE_ACE_EVENT(zfs_zfs__ace__denies);
DEFINE_ACE_EVENT(zfs_zfs__ace__allows);
/* END CSTYLED */
#define DEFINE_ARC_BUF_HDR_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_arc_buf_hdr_class, name, \
TP_PROTO(arc_buf_hdr_t *ab), \
TP_ARGS(ab))
+/* END CSTYLED */
DEFINE_ARC_BUF_HDR_EVENT(zfs_arc__hit);
DEFINE_ARC_BUF_HDR_EVENT(zfs_arc__evict);
DEFINE_ARC_BUF_HDR_EVENT(zfs_arc__delete);
/* END CSTYLED */
#define DEFINE_L2ARC_RW_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_l2arc_rw_class, name, \
TP_PROTO(vdev_t *vd, zio_t *zio), \
TP_ARGS(vd, zio))
+/* END CSTYLED */
DEFINE_L2ARC_RW_EVENT(zfs_l2arc__read);
DEFINE_L2ARC_RW_EVENT(zfs_l2arc__write);
/* END CSTYLED */
#define DEFINE_L2ARC_IODONE_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_l2arc_iodone_class, name, \
TP_PROTO(zio_t *zio, l2arc_write_callback_t *cb), \
TP_ARGS(zio, cb))
+/* END CSTYLED */
DEFINE_L2ARC_IODONE_EVENT(zfs_l2arc__iodone);
/* END CSTYLED */
#define DEFINE_ARC_MISS_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_arc_miss_class, name, \
TP_PROTO(arc_buf_hdr_t *hdr, \
const blkptr_t *bp, uint64_t size, const zbookmark_phys_t *zb), \
TP_ARGS(hdr, bp, size, zb))
+/* END CSTYLED */
DEFINE_ARC_MISS_EVENT(zfs_arc__miss);
/*
/* END CSTYLED */
#define DEFINE_L2ARC_EVICT_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_l2arc_evict_class, name, \
TP_PROTO(l2arc_dev_t *dev, \
list_t *buflist, uint64_t taddr, boolean_t all), \
TP_ARGS(dev, buflist, taddr, all))
+/* END CSTYLED */
DEFINE_L2ARC_EVICT_EVENT(zfs_l2arc__evict);
#endif /* _TRACE_ARC_H */
/* END CSTYLED */
#define DEFINE_DPRINTF_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_dprintf_class, name, \
TP_PROTO(const char *file, const char *function, int line, \
const char *msg), \
TP_ARGS(file, function, line, msg))
+/* END CSTYLED */
DEFINE_DPRINTF_EVENT(zfs_zfs__dprintf);
/*
#ifdef TP_CONDITION
#define DEFINE_SET_ERROR_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT_CONDITION(zfs_set_error_class, name, \
TP_PROTO(const char *file, const char *function, int line, \
uintptr_t error), \
TP_ARGS(file, function, line, error), \
TP_CONDITION(error))
+/* END CSTYLED */
#else
#define DEFINE_SET_ERROR_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_set_error_class, name, \
TP_PROTO(const char *file, const char *function, int line, \
uintptr_t error), \
TP_ARGS(file, function, line, error))
+/* END CSTYLED */
#endif
DEFINE_SET_ERROR_EVENT(zfs_set__error);
__entry->db_blkid, __entry->db_offset, \
__entry->db_size, __entry->db_state, __entry->db_holds
+/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_dbuf_class,
TP_PROTO(dmu_buf_impl_t *db, zio_t *zio),
TP_ARGS(db, zio),
TP_fast_assign(DBUF_TP_FAST_ASSIGN),
TP_printk(DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS)
);
+/* END CSTYLED */
#define DEFINE_DBUF_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_dbuf_class, name, \
TP_PROTO(dmu_buf_impl_t *db, zio_t *zio), \
TP_ARGS(db, zio))
+/* END CSTYLED */
DEFINE_DBUF_EVENT(zfs_blocked__read);
+/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_dbuf_evict_one_class,
TP_PROTO(dmu_buf_impl_t *db, multilist_sublist_t *mls),
TP_ARGS(db, mls),
TP_fast_assign(DBUF_TP_FAST_ASSIGN),
TP_printk(DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS)
);
+/* END CSTYLED */
#define DEFINE_DBUF_EVICT_ONE_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_dbuf_evict_one_class, name, \
TP_PROTO(dmu_buf_impl_t *db, multilist_sublist_t *mls), \
TP_ARGS(db, mls))
+/* END CSTYLED */
DEFINE_DBUF_EVICT_ONE_EVENT(zfs_dbuf__evict__one);
#endif /* _TRACE_DBUF_H */
/* END CSTYLED */
#define DEFINE_DELAY_MINTIME_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_delay_mintime_class, name, \
TP_PROTO(dmu_tx_t *tx, uint64_t dirty, uint64_t min_tx_time), \
TP_ARGS(tx, dirty, min_tx_time))
+/* END CSTYLED */
DEFINE_DELAY_MINTIME_EVENT(zfs_delay__mintime);
#endif /* _TRACE_DMU_H */
/* END CSTYLED */
#define DEFINE_DNODE_MOVE_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_dnode_move_class, name, \
TP_PROTO(dnode_t *dn, int64_t refcount, uint32_t dbufs), \
TP_ARGS(dn, refcount, dbufs))
+/* END CSTYLED */
DEFINE_DNODE_MOVE_EVENT(zfs_dnode__move);
#endif /* _TRACE_DNODE_H */
/* END CSTYLED */
#define DEFINE_MULTILIST_INSERT_REMOVE_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_multilist_insert_remove_class, name, \
TP_PROTO(multilist_t *ml, unsigned int sublist_idx, void *obj), \
TP_ARGS(ml, sublist_idx, obj))
+/* END CSTYLED */
DEFINE_MULTILIST_INSERT_REMOVE_EVENT(zfs_multilist__insert);
DEFINE_MULTILIST_INSERT_REMOVE_EVENT(zfs_multilist__remove);
/* END CSTYLED */
#define DEFINE_TXG_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_txg_class, name, \
TP_PROTO(dsl_pool_t *dp, uint64_t txg), \
TP_ARGS(dp, txg))
+/* END CSTYLED */
DEFINE_TXG_EVENT(zfs_dsl_pool_sync__done);
DEFINE_TXG_EVENT(zfs_txg__quiescing);
DEFINE_TXG_EVENT(zfs_txg__opened);
/* END CSTYLED */
#define DEFINE_ZIL_EVENT(name) \
+/* BEGIN CSTYLED */
DEFINE_EVENT(zfs_zil_class, name, \
TP_PROTO(zilog_t *zilog), \
TP_ARGS(zilog))
DEFINE_ZIL_EVENT(zfs_zil__cw1);
DEFINE_ZIL_EVENT(zfs_zil__cw2);
+/* END CSTYLED */
#endif /* _TRACE_ZIL_H */
*/
-static const struct nvlist_printops defprtops = {
+static const struct nvlist_printops defprtops =
+{
{ nvprint_boolean, NULL },
{ nvprint_boolean_value, NULL },
{ nvprint_byte, NULL },
}
static void
-parse_sharetab(sa_handle_impl_t impl_handle) {
+parse_sharetab(sa_handle_impl_t impl_handle)
+{
FILE *fp;
char line[512];
char *eol, *pathname, *resource, *fstype, *options, *description;
}
static void
-free_share(sa_share_impl_t impl_share) {
+free_share(sa_share_impl_t impl_share)
+{
sa_fstype_t *fstype;
fstype = fstypes;
#ifdef _BIG_ENDIAN
static __inline__ uint64_t
-htonll(uint64_t n) {
+htonll(uint64_t n)
+{
return (n);
}
static __inline__ uint64_t
-ntohll(uint64_t n) {
+ntohll(uint64_t n)
+{
return (n);
}
#else
static __inline__ uint64_t
-htonll(uint64_t n) {
+htonll(uint64_t n)
+{
return ((((uint64_t)htonl(n)) << 32) + htonl(n >> 32));
}
static __inline__ uint64_t
-ntohll(uint64_t n) {
+ntohll(uint64_t n)
+{
return ((((uint64_t)ntohl(n)) << 32) + ntohl(n >> 32));
}
#endif
}
static inline void
-umem_nofail_callback(umem_nofail_callback_t *cb) {}
+umem_nofail_callback(umem_nofail_callback_t *cb)
+{}
static inline umem_cache_t *
umem_cache_create(
/*ARGSUSED*/
taskq_t *
taskq_create(const char *name, int nthreads, pri_t pri,
- int minalloc, int maxalloc, uint_t flags)
+ int minalloc, int maxalloc, uint_t flags)
{
taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP);
int t;
*/
static void
rijndael_encrypt(const uint32_t rk[], int Nr, const uint32_t pt[4],
- uint32_t ct[4], int flags) {
+ uint32_t ct[4], int flags)
+{
if (flags & INTEL_AES_NI_CAPABLE) {
KPREEMPT_DISABLE;
aes_encrypt_intel(rk, Nr, pt, ct);
*/
static void
rijndael_decrypt(const uint32_t rk[], int Nr, const uint32_t ct[4],
- uint32_t pt[4], int flags) {
+ uint32_t pt[4], int flags)
+{
if (flags & INTEL_AES_NI_CAPABLE) {
KPREEMPT_DISABLE;
aes_decrypt_intel(rk, Nr, ct, pt);
int
ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
-void (*copy_block)(uint8_t *, uint8_t *))
+ void (*copy_block)(uint8_t *, uint8_t *))
{
uint64_t upper_mask = 0;
uint64_t lower_mask = 0;
/* Skein_256 */
#if !(SKEIN_USE_ASM & 256)
-
void
Skein_256_Process_Block(Skein_256_Ctxt_t *ctx, const uint8_t *blkPtr,
size_t blkCnt, size_t byteCntAdd)
-{ /* do it in C */
+{
enum {
WCNT = SKEIN_256_STATE_WORDS
};
/* run the rounds */
#define Round256(p0, p1, p2, p3, ROT, rNum) \
- X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
- X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
+ X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
+ X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
#if SKEIN_UNROLL_256 == 0
#define R256(p0, p1, p2, p3, ROT, rNum) /* fully unrolled */ \
- Round256(p0, p1, p2, p3, ROT, rNum) \
- Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rNum, Xptr);
+ Round256(p0, p1, p2, p3, ROT, rNum) \
+ Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rNum, Xptr);
#define I256(R) \
- X0 += ks[((R) + 1) % 5]; /* inject the key schedule value */ \
- X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \
- X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \
- X3 += ks[((R) + 4) % 5] + (R) + 1; \
- Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr);
+ X0 += ks[((R) + 1) % 5]; /* inject the key schedule value */ \
+ X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \
+ X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \
+ X3 += ks[((R) + 4) % 5] + (R) + 1; \
+ Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr);
#else /* looping version */
#define R256(p0, p1, p2, p3, ROT, rNum) \
- Round256(p0, p1, p2, p3, ROT, rNum) \
- Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rNum, Xptr);
+ Round256(p0, p1, p2, p3, ROT, rNum) \
+ Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rNum, Xptr);
#define I256(R) \
X0 += ks[r + (R) + 0]; /* inject the key schedule value */ \
X2 += ks[r + (R) + 2] + ts[r + (R) + 1]; \
X3 += ks[r + (R) + 3] + r + (R); \
ks[r + (R) + 4] = ks[r + (R) - 1]; /* rotate key schedule */ \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr);
+ ts[r + (R) + 2] = ts[r + (R) - 1]; \
+ Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr);
/* loop thru it */
for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_256)
Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X);
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
- }
- while (--blkCnt);
+ } while (--blkCnt);
ctx->h.T[0] = ts[0];
ctx->h.T[1] = ts[1];
}
void
Skein_512_Process_Block(Skein_512_Ctxt_t *ctx, const uint8_t *blkPtr,
size_t blkCnt, size_t byteCntAdd)
-{ /* do it in C */
+{
enum {
WCNT = SKEIN_512_STATE_WORDS
};
Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X);
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
- }
- while (--blkCnt);
+ } while (--blkCnt);
ctx->h.T[0] = ts[0];
ctx->h.T[1] = ts[1];
}
*/
int
rijndael_key_setup_enc_amd64(uint32_t rk[], const uint32_t cipherKey[],
- int keyBits)
+ int keyBits)
{
switch (keyBits) {
case 128:
*/
int
rijndael_key_setup_dec_amd64(uint32_t rk[], const uint32_t cipherKey[],
- int keyBits)
+ int keyBits)
{
switch (keyBits) {
case 128:
static int
aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
- crypto_req_handle_t req) {
+ crypto_req_handle_t req)
+{
return (aes_common_init(ctx, mechanism, key, template, req, B_TRUE));
}
static int
aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
- crypto_req_handle_t req) {
+ crypto_req_handle_t req)
+{
return (aes_common_init(ctx, mechanism, key, template, req, B_FALSE));
}
}
int
-edonr_mod_fini(void) {
+edonr_mod_fini(void)
+{
return (mod_remove(&modlinkage));
}
}
int
-skein_mod_fini(void) {
+skein_mod_fini(void)
+{
int ret;
if (skein_prov_handle != 0) {
*/
static int
do_case_compare(size_t uv, uchar_t *s1, uchar_t *s2, size_t n1,
- size_t n2, boolean_t is_it_toupper, int *errnum)
+ size_t n2, boolean_t is_it_toupper, int *errnum)
{
int f;
int sz1;
*/
static size_t
do_decomp(size_t uv, uchar_t *u8s, uchar_t *s, int sz,
- boolean_t canonical_decomposition, u8_normalization_states_t *state)
+ boolean_t canonical_decomposition, u8_normalization_states_t *state)
{
uint16_t b1 = 0;
uint16_t b2 = 0;
*/
static size_t
do_composition(size_t uv, uchar_t *s, uchar_t *comb_class, uchar_t *start,
- uchar_t *disp, size_t last, uchar_t **os, uchar_t *oslast)
+ uchar_t *disp, size_t last, uchar_t **os, uchar_t *oslast)
{
uchar_t t[U8_STREAM_SAFE_TEXT_MAX + 1];
uchar_t tc[U8_MB_CUR_MAX] = { '\0' };
*/
static size_t
collect_a_seq(size_t uv, uchar_t *u8s, uchar_t **source, uchar_t *slast,
- boolean_t is_it_toupper,
- boolean_t is_it_tolower,
- boolean_t canonical_decomposition,
- boolean_t compatibility_decomposition,
- boolean_t canonical_composition,
- int *errnum, u8_normalization_states_t *state)
+ boolean_t is_it_toupper, boolean_t is_it_tolower,
+ boolean_t canonical_decomposition, boolean_t compatibility_decomposition,
+ boolean_t canonical_composition,
+ int *errnum, u8_normalization_states_t *state)
{
uchar_t *s;
int sz;
*/
static int
do_norm_compare(size_t uv, uchar_t *s1, uchar_t *s2, size_t n1, size_t n2,
- int flag, int *errnum)
+ int flag, int *errnum)
{
int result;
size_t sz1;
*/
int
u8_strcmp(const char *s1, const char *s2, size_t n, int flag, size_t uv,
- int *errnum)
+ int *errnum)
{
int f;
size_t n1;
size_t
u8_textprep_str(char *inarray, size_t *inlen, char *outarray, size_t *outlen,
- int flag, size_t unicode_version, int *errnum)
+ int flag, size_t unicode_version, int *errnum)
{
int f;
int sz;
#if defined(_KERNEL) && defined(HAVE_SPL)
static int __init
-unicode_init(void) {
+unicode_init(void)
+{
return (0);
}
#include <strings.h>
static void
-fletcher_4_sse2_init(fletcher_4_ctx_t *ctx) {
+fletcher_4_sse2_init(fletcher_4_ctx_t *ctx)
+{
bzero(ctx->sse, 4 * sizeof (zfs_fletcher_sse_t));
}
static void
-fletcher_4_sse2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) {
+fletcher_4_sse2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
+{
uint64_t A, B, C, D;
/*
};
static void
-sg_init_table(struct scatterlist *sg, int nr) {
+sg_init_table(struct scatterlist *sg, int nr)
+{
memset(sg, 0, nr * sizeof (struct scatterlist));
sg[nr - 1].end = 1;
}
*/
void
abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
- ssize_t csize, ssize_t dsize, const unsigned parity,
- void (*func_raidz_gen)(void **, const void *, size_t, size_t))
+ ssize_t csize, ssize_t dsize, const unsigned parity,
+ void (*func_raidz_gen)(void **, const void *, size_t, size_t))
{
int i;
ssize_t len, dlen;
*/
void
abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
- ssize_t tsize, const unsigned parity,
- void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
- const unsigned *mul),
- const unsigned *mul)
+ ssize_t tsize, const unsigned parity,
+ void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
+ const unsigned *mul),
+ const unsigned *mul)
{
int i;
ssize_t len;
*/
unsigned int
abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
- unsigned int io_size, size_t off)
+ unsigned int io_size, size_t off)
{
int i;
struct abd_iter aiter;
* increase this negative difference.
*/
static uint64_t
-arc_evictable_memory(void) {
+arc_evictable_memory(void)
+{
uint64_t arc_clean =
refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
static void
__dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
dnode_t *dn, uint8_t level, uint64_t blkid,
- boolean_t fail_sparse, boolean_t fail_uncached,
- void *tag, dmu_buf_impl_t **dbp, int depth)
+ boolean_t fail_sparse, boolean_t fail_uncached,
+ void *tag, dmu_buf_impl_t **dbp, int depth)
{
dh->dh_dn = dn;
dh->dh_level = level;
int
dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
- dmu_tx_t *tx)
+ dmu_tx_t *tx)
{
dnode_t *dn;
int err;
void
dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
- dmu_tx_t *tx)
+ dmu_tx_t *tx)
{
dnode_t *dn;
void
dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
- dmu_tx_t *tx)
+ dmu_tx_t *tx)
{
dnode_t *dn;
noinline static int
receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
- void *data)
+ void *data)
{
dmu_object_info_t doi;
dmu_tx_t *tx;
noinline static int
receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
- arc_buf_t *abuf)
+ arc_buf_t *abuf)
{
dmu_tx_t *tx;
dmu_buf_t *bonus;
*/
int
secpolicy_vnode_access2(const cred_t *cr, struct inode *ip, uid_t owner,
- mode_t curmode, mode_t wantmode)
+ mode_t curmode, mode_t wantmode)
{
return (0);
}
* See the comment above spa_slop_shift for details.
*/
uint64_t
-spa_get_slop_space(spa_t *spa) {
+spa_get_slop_space(spa_t *spa)
+{
uint64_t space = spa_get_dspace(spa);
return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1));
}
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
-
+/* BEGIN CSTYLED */
module_param(metaslabs_per_vdev, int, 0644);
MODULE_PARM_DESC(metaslabs_per_vdev,
"Divide added vdev into approximately (but no more than) this number "
"of metaslabs");
+/* END CSTYLED */
#endif
};
#if defined(_KERNEL) && defined(HAVE_SPL)
+/* BEGIN CSTYLED */
module_param(zfs_vdev_mirror_rotating_inc, int, 0644);
MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc,
"Rotating media load increment for non-seeking I/O's");
"Rotating media load increment for seeking I/O's");
module_param(zfs_vdev_mirror_rotating_seek_offset, int, 0644);
+
MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset,
"Offset in bytes from the last I/O which "
"triggers a reduced rotating media seek increment");
module_param(zfs_vdev_mirror_non_rotating_seek_inc, int, 0644);
MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc,
"Non-rotating media load increment for seeking I/O's");
-
+/* END CSTYLED */
#endif
static raidz_rec_f
reconstruct_fun_p_sel(raidz_map_t *rm, const int *parity_valid,
- const int nbaddata)
+ const int nbaddata)
{
if (nbaddata == 1 && parity_valid[CODE_P]) {
return (rm->rm_ops->rec[RAIDZ_REC_P]);
static raidz_rec_f
reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid,
- const int nbaddata)
+ const int nbaddata)
{
if (nbaddata == 1) {
if (parity_valid[CODE_P]) {
static raidz_rec_f
reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid,
- const int nbaddata)
+ const int nbaddata)
{
if (nbaddata == 1) {
if (parity_valid[CODE_P]) {
*/
int
vdev_raidz_math_reconstruct(raidz_map_t *rm, const int *parity_valid,
- const int *dt, const int nbaddata)
+ const int *dt, const int nbaddata)
{
raidz_rec_f rec_fn = NULL;
}
module_param_call(zfs_vdev_raidz_impl, zfs_vdev_raidz_impl_set,
- zfs_vdev_raidz_impl_get, NULL, 0644);
+ zfs_vdev_raidz_impl_get, NULL, 0644);
MODULE_PARM_DESC(zfs_vdev_raidz_impl, "Select raidz implementation.");
#endif
#if defined(__aarch64__)
-
+/* BEGIN CSTYLED */
const uint8_t
__attribute__((aligned(256))) gf_clmul_mod_lt[4*256][16] = {
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
{ 0x00, 0xff, 0xfe, 0x01, 0xfc, 0x03, 0x02, 0xfd,
0xf8, 0x07, 0x06, 0xf9, 0x04, 0xfb, 0xfa, 0x05 }
};
-
+/* END CSTYLED */
#endif /* defined(__aarch64__) */
*/
static void
raidz_gen_pq_add(void **c, const void *dc, const size_t csize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *p = (v_t *)c[0];
v_t *q = (v_t *)c[1];
*/
static void
raidz_gen_pqr_add(void **c, const void *dc, const size_t csize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *p = (v_t *)c[0];
v_t *q = (v_t *)c[1];
*/
static void
raidz_syn_q_abd(void **xc, const void *dc, const size_t xsize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *x = (v_t *)xc[TARGET_X];
const v_t *d = (v_t *)dc;
*/
static void
raidz_syn_r_abd(void **xc, const void *dc, const size_t tsize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *x = (v_t *)xc[TARGET_X];
const v_t *d = (v_t *)dc;
*/
static void
raidz_syn_pq_abd(void **tc, const void *dc, const size_t tsize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *x = (v_t *)tc[TARGET_X];
v_t *y = (v_t *)tc[TARGET_Y];
*/
static void
raidz_rec_pq_abd(void **tc, const size_t tsize, void **c,
- const unsigned *mul)
+ const unsigned *mul)
{
v_t *x = (v_t *)tc[TARGET_X];
v_t *y = (v_t *)tc[TARGET_Y];
*/
static void
raidz_syn_pr_abd(void **c, const void *dc, const size_t tsize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
*/
static void
raidz_rec_pr_abd(void **t, const size_t tsize, void **c,
- const unsigned *mul)
+ const unsigned *mul)
{
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
*/
static void
raidz_syn_qr_abd(void **c, const void *dc, const size_t tsize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
*/
static void
raidz_rec_qr_abd(void **t, const size_t tsize, void **c,
- const unsigned *mul)
+ const unsigned *mul)
{
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
*/
static void
raidz_syn_pqr_abd(void **c, const void *dc, const size_t tsize,
- const size_t dsize)
+ const size_t dsize)
{
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
*/
static void
raidz_rec_pqr_abd(void **t, const size_t tsize, void **c,
- const unsigned * const mul)
+ const unsigned * const mul)
{
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
#if defined(__x86_64)
#if defined(HAVE_SSSE3) || defined(HAVE_AVX2) || defined(HAVE_AVX512BW)
-
+/* BEGIN CSTYLED */
const uint8_t
-__attribute__((aligned(256))) gf_clmul_mod_lt[4*256][16] = {
+__attribute__((aligned(256))) gf_clmul_mod_lt[4*256][16] =
+{
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
{ 0x00, 0xff, 0xfe, 0x01, 0xfc, 0x03, 0x02, 0xfd,
0xf8, 0x07, 0x06, 0xf9, 0x04, 0xfb, 0xfa, 0x05 }
};
-
+/* END CSTYLED */
#endif /* defined(HAVE_SSSE3) || defined(HAVE_AVX2) || defined(HAVE_AVX512BW) */
#endif /* defined(__x86_64) */
*/
int
zfsctl_snapdir_mkdir(struct inode *dip, char *dirname, vattr_t *vap,
- struct inode **ipp, cred_t *cr, int flags)
+ struct inode **ipp, cred_t *cr, int flags)
{
zfs_sb_t *zsb = ITOZSB(dip);
char *dsname;
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
- abd_t *data, uint64_t size, int type, zio_priority_t priority,
- enum zio_flag flags, zio_done_func_t *done, void *private)
+ abd_t *data, uint64_t size, int type, zio_priority_t priority,
+ enum zio_flag flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
/*ARGSUSED*/
static void
abd_checksum_off(abd_t *abd, uint64_t size,
- const void *ctx_template, zio_cksum_t *zcp)
+ const void *ctx_template, zio_cksum_t *zcp)
{
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
}
*/
static int
zpl_readpages(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+ struct list_head *pages, unsigned nr_pages)
{
return (read_cache_pages(mapping, pages,
(filler_t *)zpl_readpage, filp));
} zvol_snapdev_cb_arg_t;
static int
-zvol_set_snapdev_cb(const char *dsname, void *param) {
+zvol_set_snapdev_cb(const char *dsname, void *param)
+{
zvol_snapdev_cb_arg_t *arg = param;
if (strchr(dsname, '@') == NULL)
static int
zpios_get_work_item(run_args_t *run_args, dmu_obj_t *obj, __u64 *offset,
- __u32 *chunk_size, zpios_region_t **region, __u32 flags)
+ __u32 *chunk_size, zpios_region_t **region, __u32 flags)
{
int i, j, count = 0;
unsigned int random_int;
static int
zpios_dmu_write(run_args_t *run_args, objset_t *os, uint64_t object,
- uint64_t offset, uint64_t size, const void *buf)
+ uint64_t offset, uint64_t size, const void *buf)
{
struct dmu_tx *tx;
int rc, how = TXG_WAIT;
static char xattrbytes[XATTR_SIZE_MAX];
static int
-usage(int argc, char **argv) {
+usage(int argc, char **argv)
+{
fprintf(stderr,
- "usage: %s [-hvycdrRk] [-n <nth>] [-f <files>] [-x <xattrs>]\n"
- " [-s <bytes>] [-p <path>] [-t <script> ] [-o <phase>]\n",
- argv[0]);
+ "usage: %s [-hvycdrRk] [-n <nth>] [-f <files>] [-x <xattrs>]\n"
+ " [-s <bytes>] [-p <path>] [-t <script> ] [-o <phase>]\n",
+ argv[0]);
fprintf(stderr,
- " --help -h This help\n"
- " --verbose -v Increase verbosity\n"
- " --verify -y Verify xattr contents\n"
- " --nth -n <nth> Print every nth file\n"
- " --files -f <files> Set xattrs on N files\n"
- " --xattrs -x <xattrs> Set N xattrs on each file\n"
- " --size -s <bytes> Set N bytes per xattr\n"
- " --path -p <path> Path to files\n"
- " --synccaches -c Sync caches between phases\n"
- " --dropcaches -d Drop caches between phases\n"
- " --script -t <script> Exec script between phases\n"
- " --seed -e <seed> Random seed value\n"
- " --random -r Randomly sized xattrs [16-size]\n"
- " --randomvalue -R Random xattr values\n"
- " --keep -k Don't unlink files\n"
- " --only -o <num> Only run phase N\n"
- " 0=all, 1=create, 2=setxattr,\n"
- " 3=getxattr, 4=unlink\n\n");
+ " --help -h This help\n"
+ " --verbose -v Increase verbosity\n"
+ " --verify -y Verify xattr contents\n"
+ " --nth -n <nth> Print every nth file\n"
+ " --files -f <files> Set xattrs on N files\n"
+ " --xattrs -x <xattrs> Set N xattrs on each file\n"
+ " --size -s <bytes> Set N bytes per xattr\n"
+ " --path -p <path> Path to files\n"
+ " --synccaches -c Sync caches between phases\n"
+ " --dropcaches -d Drop caches between phases\n"
+ " --script -t <script> Exec script between phases\n"
+ " --seed -e <seed> Random seed value\n"
+ " --random -r Randomly sized xattrs [16-size]\n"
+ " --randomvalue -R Random xattr values\n"
+ " --keep -k Don't unlink files\n"
+ " --only -o <num> Only run phase N\n"
+ " 0=all, 1=create, 2=setxattr,\n"
+ " 3=getxattr, 4=unlink\n\n");
return (1);
}