First of all readahead value is maintained in sectors. By default sector size – 512B.

Seperate disk

Default readahead:

# blockdev --getra /dev/sda1
256  

So in KB:

readahed = 256 * 512B / 1024 = 128KB  

RAID0

Software md RAID0 readahead formula:

readahed = chunk size * N disk in stripe * 2  

Example for RAID0 of 4 disks with default 512KB chunk size:

# cat /proc/mdstat | grep md3
md3 : active raid0 sda3[1] sdb3[0] sdc3[1] sdd3[0]  
      816082432 blocks super 1.2 512k chunks
readahed = 512KB * 4 * 2 = 4096 KB = 8192 sectors  
# blockdev --getra /dev/md3
8192  

RAID10

readahed = chunk size * stripes * 2 * X

where X = 0.5 for near-copies raid layout;  
      X = 1 for other layouts. 
# cat /proc/mdstat | grep md2
md2 : active raid10 sdc2[3] sda2[1] sdb2[2] sdd2[0]  
      39060480 blocks 512K chunks 2 near-copies [4/4] [UUUU]

Example for RAID10 of 4 disks with default 512KB chunk size and near-copies layout:

readahed = 512KB * 4 * 2 * 0.5 = 2048 KB = 4096 sectors  
# blockdev --getra /dev/md2
4096  

Linux kernel code

Disk readahead

./mm/backing-dev.c

struct backing_dev_info default_backing_dev_info = {  
        .name           = "default",
        .ra_pages       = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
        .state          = 0,
        .capabilities   = BDI_CAP_MAP_COPY,
};

where VM_MAX_READAHEAD is:

./include/linux/mm.h:

#define VM_MAX_READAHEAD   128     /* kbytes */

RAID0

./drivers/md/raid0.c

        /* calculate the max read-ahead size.
         * For read-ahead of large files to be effective, we need to
         * readahead at least twice a whole stripe. i.e. number of devices
         * multiplied by chunk size times 2.
         * If an individual device has an ra_pages greater than the
         * chunk size, then we will not drive that device as hard as it
         * wants.  We consider this a configuration error: a larger
         * chunksize should be used in that case.
         */
        {
                int stripe = mddev->raid_disks *
                        (mddev->chunk_sectors << 9) / PAGE_SIZE;
                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
        }

RADI10

./drivers/md/raid10.c

        if (mddev->queue) {
                int stripe = conf->geo.raid_disks *
                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
                mddev->queue->backing_dev_info.congested_fn = raid10_congested;
                mddev->queue->backing_dev_info.congested_data = mddev;

                /* Calculate max read-ahead size.
                 * We need to readahead at least twice a whole stripe....
                 * maybe...
                 */
                stripe /= conf->geo.near_copies;
                if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
                blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
        }

RAID lauyouts

# man 4 md

When configuring a RAID10 array, it is necessary to specify the number of replicas of each data block that are required (this will normally be 2) and whether the replicas should be 'near', 'offset' or 'far'. (Note that the 'offset' layout is only available from 2.6.18).
When 'near' replicas are chosen, the multiple copies of a given chunk are laid out consecutively across the stripes of the array, so the two copies of a datablock will likely be at the same offset on two adjacent devices.

When 'far' replicas are chosen, the multiple copies of a given chunk are laid out quite distant from each other. The first copy of all data blocks will be striped across the early part of all drives in RAID0 fashion, and then the next copy of all blocks will be striped across a later section of all drives, always ensuring that all copies of any given block are on different drives.

The 'far' arrangement can give sequential read performance equal to that of a RAID0 array, but at the cost of reduced write performance.