mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
Merge branch 'for-linus' of git://neil.brown.name/md
* 'for-linus' of git://neil.brown.name/md: async_tx: fix asynchronous raid6 recovery for ddf layouts async_pq: rename scribble page async_pq: kill a stray dma_map() call and other cleanups md/raid6: kill a gcc-4.0.1 'uninitialized variable' warning raid6/async_tx: handle holes in block list in async_syndrome_val md/async: don't pass a memory pointer as a page pointer. md: Fix handling of raid5 array which is being reshaped to fewer devices. md: fix problems with RAID6 calculations for DDF. md/raid456: downlevel multicore operations to raid_run_ops md: drivers/md/unroll.pl replaced with awk analog md: remove clumsy usage of do_sync_mapping_range from bitmap code md: raid1/raid10: handle allocation errors during array setup. md/raid5: initialize conf->device_lock earlier md/raid1/raid10: add a cond_resched Revert "md: do not progress the resync process if the stripe was blocked"
This commit is contained in:
commit
bf699c9bac
15 changed files with 291 additions and 231 deletions
|
@ -26,14 +26,10 @@
|
|||
#include <linux/async_tx.h>
|
||||
|
||||
/**
|
||||
* scribble - space to hold throwaway P buffer for synchronous gen_syndrome
|
||||
* pq_scribble_page - space to hold throwaway P or Q buffer for
|
||||
* synchronous gen_syndrome
|
||||
*/
|
||||
static struct page *scribble;
|
||||
|
||||
static bool is_raid6_zero_block(struct page *p)
|
||||
{
|
||||
return p == (void *) raid6_empty_zero_page;
|
||||
}
|
||||
static struct page *pq_scribble_page;
|
||||
|
||||
/* the struct page *blocks[] parameter passed to async_gen_syndrome()
|
||||
* and async_syndrome_val() contains the 'P' destination address at
|
||||
|
@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
|
|||
* sources and update the coefficients accordingly
|
||||
*/
|
||||
for (i = 0, idx = 0; i < src_cnt; i++) {
|
||||
if (is_raid6_zero_block(blocks[i]))
|
||||
if (blocks[i] == NULL)
|
||||
continue;
|
||||
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
|||
srcs = (void **) blocks;
|
||||
|
||||
for (i = 0; i < disks; i++) {
|
||||
if (is_raid6_zero_block(blocks[i])) {
|
||||
if (blocks[i] == NULL) {
|
||||
BUG_ON(i > disks - 3); /* P or Q can't be zero */
|
||||
srcs[i] = blocks[i];
|
||||
srcs[i] = (void*)raid6_empty_zero_page;
|
||||
} else
|
||||
srcs[i] = page_address(blocks[i]) + offset;
|
||||
}
|
||||
|
@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
|||
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
|
||||
* PAGE_SIZE as a temporary buffer of this size is used in the
|
||||
* synchronous path. 'disks' always accounts for both destination
|
||||
* buffers.
|
||||
* buffers. If any source buffers (blocks[i] where i < disks - 2) are
|
||||
* set to NULL those buffers will be replaced with the raid6_zero_page
|
||||
* in the synchronous path and omitted in the hardware-asynchronous
|
||||
* path.
|
||||
*
|
||||
* 'blocks' note: if submit->scribble is NULL then the contents of
|
||||
* 'blocks' may be overridden
|
||||
* 'blocks' may be overwritten to perform address conversions
|
||||
* (dma_map_page() or page_address()).
|
||||
*/
|
||||
struct dma_async_tx_descriptor *
|
||||
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
||||
|
@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
|||
async_tx_quiesce(&submit->depend_tx);
|
||||
|
||||
if (!P(blocks, disks)) {
|
||||
P(blocks, disks) = scribble;
|
||||
P(blocks, disks) = pq_scribble_page;
|
||||
BUG_ON(len + offset > PAGE_SIZE);
|
||||
}
|
||||
if (!Q(blocks, disks)) {
|
||||
Q(blocks, disks) = scribble;
|
||||
Q(blocks, disks) = pq_scribble_page;
|
||||
BUG_ON(len + offset > PAGE_SIZE);
|
||||
}
|
||||
do_sync_gen_syndrome(blocks, offset, disks, len, submit);
|
||||
|
@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
|
|||
len);
|
||||
struct dma_device *device = chan ? chan->device : NULL;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
unsigned char coefs[disks-2];
|
||||
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
|
||||
dma_addr_t *dma_src = NULL;
|
||||
int src_cnt = 0;
|
||||
|
||||
BUG_ON(disks < 4);
|
||||
|
||||
|
@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
|
|||
__func__, disks, len);
|
||||
if (!P(blocks, disks))
|
||||
dma_flags |= DMA_PREP_PQ_DISABLE_P;
|
||||
else
|
||||
pq[0] = dma_map_page(dev, P(blocks, disks),
|
||||
offset, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (!Q(blocks, disks))
|
||||
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
|
||||
else
|
||||
pq[1] = dma_map_page(dev, Q(blocks, disks),
|
||||
offset, len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (submit->flags & ASYNC_TX_FENCE)
|
||||
dma_flags |= DMA_PREP_FENCE;
|
||||
for (i = 0; i < disks; i++)
|
||||
for (i = 0; i < disks-2; i++)
|
||||
if (likely(blocks[i])) {
|
||||
BUG_ON(is_raid6_zero_block(blocks[i]));
|
||||
dma_src[i] = dma_map_page(dev, blocks[i],
|
||||
offset, len,
|
||||
DMA_TO_DEVICE);
|
||||
dma_src[src_cnt] = dma_map_page(dev, blocks[i],
|
||||
offset, len,
|
||||
DMA_TO_DEVICE);
|
||||
coefs[src_cnt] = raid6_gfexp[i];
|
||||
src_cnt++;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
|
||||
disks - 2,
|
||||
raid6_gfexp,
|
||||
src_cnt,
|
||||
coefs,
|
||||
len, pqres,
|
||||
dma_flags);
|
||||
if (likely(tx))
|
||||
|
@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);
|
|||
|
||||
static int __init async_pq_init(void)
|
||||
{
|
||||
scribble = alloc_page(GFP_KERNEL);
|
||||
pq_scribble_page = alloc_page(GFP_KERNEL);
|
||||
|
||||
if (scribble)
|
||||
if (pq_scribble_page)
|
||||
return 0;
|
||||
|
||||
pr_err("%s: failed to allocate required spare page\n", __func__);
|
||||
|
@ -385,7 +397,7 @@ static int __init async_pq_init(void)
|
|||
|
||||
static void __exit async_pq_exit(void)
|
||||
{
|
||||
put_page(scribble);
|
||||
put_page(pq_scribble_page);
|
||||
}
|
||||
|
||||
module_init(async_pq_init);
|
||||
|
|
|
@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
|
|||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
|
||||
struct async_submit_ctl *submit)
|
||||
__2data_recov_4(int disks, size_t bytes, int faila, int failb,
|
||||
struct page **blocks, struct async_submit_ctl *submit)
|
||||
{
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
struct page *p, *q, *a, *b;
|
||||
|
@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
|
|||
void *cb_param = submit->cb_param;
|
||||
void *scribble = submit->scribble;
|
||||
|
||||
p = blocks[4-2];
|
||||
q = blocks[4-1];
|
||||
p = blocks[disks-2];
|
||||
q = blocks[disks-1];
|
||||
|
||||
a = blocks[faila];
|
||||
b = blocks[failb];
|
||||
|
@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
|
|||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
|
||||
struct async_submit_ctl *submit)
|
||||
__2data_recov_5(int disks, size_t bytes, int faila, int failb,
|
||||
struct page **blocks, struct async_submit_ctl *submit)
|
||||
{
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
struct page *p, *q, *g, *dp, *dq;
|
||||
|
@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
|
|||
dma_async_tx_callback cb_fn = submit->cb_fn;
|
||||
void *cb_param = submit->cb_param;
|
||||
void *scribble = submit->scribble;
|
||||
int uninitialized_var(good);
|
||||
int i;
|
||||
int good_srcs, good, i;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
good_srcs = 0;
|
||||
good = -1;
|
||||
for (i = 0; i < disks-2; i++) {
|
||||
if (blocks[i] == NULL)
|
||||
continue;
|
||||
if (i == faila || i == failb)
|
||||
continue;
|
||||
else {
|
||||
good = i;
|
||||
break;
|
||||
}
|
||||
good = i;
|
||||
good_srcs++;
|
||||
}
|
||||
BUG_ON(i >= 3);
|
||||
BUG_ON(good_srcs > 1);
|
||||
|
||||
p = blocks[5-2];
|
||||
q = blocks[5-1];
|
||||
p = blocks[disks-2];
|
||||
q = blocks[disks-1];
|
||||
g = blocks[good];
|
||||
|
||||
/* Compute syndrome with zero for the missing data pages
|
||||
|
@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
|
|||
* delta p and delta q
|
||||
*/
|
||||
dp = blocks[faila];
|
||||
blocks[faila] = (void *)raid6_empty_zero_page;
|
||||
blocks[faila] = NULL;
|
||||
blocks[disks-2] = dp;
|
||||
dq = blocks[failb];
|
||||
blocks[failb] = (void *)raid6_empty_zero_page;
|
||||
blocks[failb] = NULL;
|
||||
blocks[disks-1] = dq;
|
||||
|
||||
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
|
||||
|
@ -323,6 +324,8 @@ struct dma_async_tx_descriptor *
|
|||
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
|
||||
struct page **blocks, struct async_submit_ctl *submit)
|
||||
{
|
||||
int non_zero_srcs, i;
|
||||
|
||||
BUG_ON(faila == failb);
|
||||
if (failb < faila)
|
||||
swap(faila, failb);
|
||||
|
@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
|
|||
*/
|
||||
if (!submit->scribble) {
|
||||
void **ptrs = (void **) blocks;
|
||||
int i;
|
||||
|
||||
async_tx_quiesce(&submit->depend_tx);
|
||||
for (i = 0; i < disks; i++)
|
||||
ptrs[i] = page_address(blocks[i]);
|
||||
if (blocks[i] == NULL)
|
||||
ptrs[i] = (void *) raid6_empty_zero_page;
|
||||
else
|
||||
ptrs[i] = page_address(blocks[i]);
|
||||
|
||||
raid6_2data_recov(disks, bytes, faila, failb, ptrs);
|
||||
|
||||
|
@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
switch (disks) {
|
||||
case 4:
|
||||
non_zero_srcs = 0;
|
||||
for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
|
||||
if (blocks[i])
|
||||
non_zero_srcs++;
|
||||
switch (non_zero_srcs) {
|
||||
case 0:
|
||||
case 1:
|
||||
/* There must be at least 2 sources - the failed devices. */
|
||||
BUG();
|
||||
|
||||
case 2:
|
||||
/* dma devices do not uniformly understand a zero source pq
|
||||
* operation (in contrast to the synchronous case), so
|
||||
* explicitly handle the 4 disk special case
|
||||
* explicitly handle the special case of a 4 disk array with
|
||||
* both data disks missing.
|
||||
*/
|
||||
return __2data_recov_4(bytes, faila, failb, blocks, submit);
|
||||
case 5:
|
||||
return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
|
||||
case 3:
|
||||
/* dma devices do not uniformly understand a single
|
||||
* source pq operation (in contrast to the synchronous
|
||||
* case), so explicitly handle the 5 disk special case
|
||||
* case), so explicitly handle the special case of a 5 disk
|
||||
* array with 2 of 3 data disks missing.
|
||||
*/
|
||||
return __2data_recov_5(bytes, faila, failb, blocks, submit);
|
||||
return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
|
||||
default:
|
||||
return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
|
||||
}
|
||||
|
@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
|
|||
dma_async_tx_callback cb_fn = submit->cb_fn;
|
||||
void *cb_param = submit->cb_param;
|
||||
void *scribble = submit->scribble;
|
||||
int good_srcs, good, i;
|
||||
struct page *srcs[2];
|
||||
|
||||
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
|
||||
|
@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
|
|||
*/
|
||||
if (!scribble) {
|
||||
void **ptrs = (void **) blocks;
|
||||
int i;
|
||||
|
||||
async_tx_quiesce(&submit->depend_tx);
|
||||
for (i = 0; i < disks; i++)
|
||||
ptrs[i] = page_address(blocks[i]);
|
||||
if (blocks[i] == NULL)
|
||||
ptrs[i] = (void*)raid6_empty_zero_page;
|
||||
else
|
||||
ptrs[i] = page_address(blocks[i]);
|
||||
|
||||
raid6_datap_recov(disks, bytes, faila, ptrs);
|
||||
|
||||
|
@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
good_srcs = 0;
|
||||
good = -1;
|
||||
for (i = 0; i < disks-2; i++) {
|
||||
if (i == faila)
|
||||
continue;
|
||||
if (blocks[i]) {
|
||||
good = i;
|
||||
good_srcs++;
|
||||
if (good_srcs > 1)
|
||||
break;
|
||||
}
|
||||
}
|
||||
BUG_ON(good_srcs == 0);
|
||||
|
||||
p = blocks[disks-2];
|
||||
q = blocks[disks-1];
|
||||
|
||||
|
@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
|
|||
* Use the dead data page as temporary storage for delta q
|
||||
*/
|
||||
dq = blocks[faila];
|
||||
blocks[faila] = (void *)raid6_empty_zero_page;
|
||||
blocks[faila] = NULL;
|
||||
blocks[disks-1] = dq;
|
||||
|
||||
/* in the 4 disk case we only need to perform a single source
|
||||
* multiplication
|
||||
/* in the 4-disk case we only need to perform a single source
|
||||
* multiplication with the one good data block.
|
||||
*/
|
||||
if (disks == 4) {
|
||||
int good = faila == 0 ? 1 : 0;
|
||||
if (good_srcs == 1) {
|
||||
struct page *g = blocks[good];
|
||||
|
||||
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
|
||||
|
|
|
@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
|
|||
void *cb_param_orig = submit->cb_param;
|
||||
enum async_tx_flags flags_orig = submit->flags;
|
||||
enum dma_ctrl_flags dma_flags;
|
||||
int xor_src_cnt;
|
||||
int xor_src_cnt = 0;
|
||||
dma_addr_t dma_dest;
|
||||
|
||||
/* map the dest bidrectional in case it is re-used as a source */
|
||||
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
|
||||
for (i = 0; i < src_cnt; i++) {
|
||||
/* only map the dest once */
|
||||
if (!src_list[i])
|
||||
continue;
|
||||
if (unlikely(src_list[i] == dest)) {
|
||||
dma_src[i] = dma_dest;
|
||||
dma_src[xor_src_cnt++] = dma_dest;
|
||||
continue;
|
||||
}
|
||||
dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
|
||||
len, DMA_TO_DEVICE);
|
||||
dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
|
||||
len, DMA_TO_DEVICE);
|
||||
}
|
||||
src_cnt = xor_src_cnt;
|
||||
|
||||
while (src_cnt) {
|
||||
submit->flags = flags_orig;
|
||||
|
@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
|
|||
int src_cnt, size_t len, struct async_submit_ctl *submit)
|
||||
{
|
||||
int i;
|
||||
int xor_src_cnt;
|
||||
int xor_src_cnt = 0;
|
||||
int src_off = 0;
|
||||
void *dest_buf;
|
||||
void **srcs;
|
||||
|
@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
|
|||
|
||||
/* convert to buffer pointers */
|
||||
for (i = 0; i < src_cnt; i++)
|
||||
srcs[i] = page_address(src_list[i]) + offset;
|
||||
|
||||
if (src_list[i])
|
||||
srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
|
||||
src_cnt = xor_src_cnt;
|
||||
/* set destination address */
|
||||
dest_buf = page_address(dest) + offset;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
|
|||
obj-$(CONFIG_DM_ZERO) += dm-zero.o
|
||||
|
||||
quiet_cmd_unroll = UNROLL $@
|
||||
cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \
|
||||
cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
|
||||
< $< > $@ || ( rm -f $@ && exit 1 )
|
||||
|
||||
ifeq ($(CONFIG_ALTIVEC),y)
|
||||
|
@ -59,56 +59,56 @@ endif
|
|||
|
||||
targets += raid6int1.c
|
||||
$(obj)/raid6int1.c: UNROLL := 1
|
||||
$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
targets += raid6int2.c
|
||||
$(obj)/raid6int2.c: UNROLL := 2
|
||||
$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
targets += raid6int4.c
|
||||
$(obj)/raid6int4.c: UNROLL := 4
|
||||
$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
targets += raid6int8.c
|
||||
$(obj)/raid6int8.c: UNROLL := 8
|
||||
$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
targets += raid6int16.c
|
||||
$(obj)/raid6int16.c: UNROLL := 16
|
||||
$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
targets += raid6int32.c
|
||||
$(obj)/raid6int32.c: UNROLL := 32
|
||||
$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
CFLAGS_raid6altivec1.o += $(altivec_flags)
|
||||
targets += raid6altivec1.c
|
||||
$(obj)/raid6altivec1.c: UNROLL := 1
|
||||
$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
CFLAGS_raid6altivec2.o += $(altivec_flags)
|
||||
targets += raid6altivec2.c
|
||||
$(obj)/raid6altivec2.c: UNROLL := 2
|
||||
$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
CFLAGS_raid6altivec4.o += $(altivec_flags)
|
||||
targets += raid6altivec4.c
|
||||
$(obj)/raid6altivec4.c: UNROLL := 4
|
||||
$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
CFLAGS_raid6altivec8.o += $(altivec_flags)
|
||||
targets += raid6altivec8.c
|
||||
$(obj)/raid6altivec8.c: UNROLL := 8
|
||||
$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
|
||||
$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
|
||||
$(call if_changed,unroll)
|
||||
|
||||
quiet_cmd_mktable = TABLE $@
|
||||
|
|
|
@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev)
|
|||
bitmap->offset = mddev->bitmap_offset;
|
||||
if (file) {
|
||||
get_file(file);
|
||||
do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX,
|
||||
SYNC_FILE_RANGE_WAIT_BEFORE |
|
||||
SYNC_FILE_RANGE_WRITE |
|
||||
SYNC_FILE_RANGE_WAIT_AFTER);
|
||||
/* As future accesses to this file will use bmap,
|
||||
* and bypass the page cache, we must sync the file
|
||||
* first.
|
||||
*/
|
||||
vfs_fsync(file, file->f_dentry, 1);
|
||||
}
|
||||
/* read superblock from bitmap file (this sets bitmap->chunksize) */
|
||||
err = bitmap_read_sb(bitmap);
|
||||
|
|
|
@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev)
|
|||
rdev->desc_nr = i++;
|
||||
rdev->raid_disk = rdev->desc_nr;
|
||||
set_bit(In_sync, &rdev->flags);
|
||||
} else if (rdev->raid_disk >= mddev->raid_disks) {
|
||||
} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
|
||||
rdev->raid_disk = -1;
|
||||
clear_bit(In_sync, &rdev->flags);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
|
|||
|
||||
/* allocate a r1bio with room for raid_disks entries in the bios array */
|
||||
r1_bio = kzalloc(size, gfp_flags);
|
||||
if (!r1_bio)
|
||||
if (!r1_bio && pi->mddev)
|
||||
unplug_slaves(pi->mddev);
|
||||
|
||||
return r1_bio;
|
||||
|
@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev)
|
|||
generic_make_request(bio);
|
||||
}
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
if (unplug)
|
||||
unplug_slaves(mddev);
|
||||
|
@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev)
|
|||
conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
|
||||
if (!conf->poolinfo)
|
||||
goto out_no_mem;
|
||||
conf->poolinfo->mddev = mddev;
|
||||
conf->poolinfo->mddev = NULL;
|
||||
conf->poolinfo->raid_disks = mddev->raid_disks;
|
||||
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
|
||||
r1bio_pool_free,
|
||||
conf->poolinfo);
|
||||
if (!conf->r1bio_pool)
|
||||
goto out_no_mem;
|
||||
conf->poolinfo->mddev = mddev;
|
||||
|
||||
spin_lock_init(&conf->device_lock);
|
||||
mddev->queue->queue_lock = &conf->device_lock;
|
||||
|
|
|
@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
|
|||
|
||||
/* allocate a r10bio with room for raid_disks entries in the bios array */
|
||||
r10_bio = kzalloc(size, gfp_flags);
|
||||
if (!r10_bio)
|
||||
if (!r10_bio && conf->mddev)
|
||||
unplug_slaves(conf->mddev);
|
||||
|
||||
return r10_bio;
|
||||
|
@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev)
|
|||
generic_make_request(bio);
|
||||
}
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
if (unplug)
|
||||
unplug_slaves(mddev);
|
||||
|
@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev)
|
|||
if (!conf->tmppage)
|
||||
goto out_free_conf;
|
||||
|
||||
conf->mddev = mddev;
|
||||
conf->raid_disks = mddev->raid_disks;
|
||||
conf->near_copies = nc;
|
||||
conf->far_copies = fc;
|
||||
|
@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev)
|
|||
goto out_free_conf;
|
||||
}
|
||||
|
||||
conf->mddev = mddev;
|
||||
spin_lock_init(&conf->device_lock);
|
||||
mddev->queue->queue_lock = &conf->device_lock;
|
||||
|
||||
|
|
|
@ -156,13 +156,16 @@ static inline int raid6_next_disk(int disk, int raid_disks)
|
|||
static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
|
||||
int *count, int syndrome_disks)
|
||||
{
|
||||
int slot;
|
||||
int slot = *count;
|
||||
|
||||
if (sh->ddf_layout)
|
||||
(*count)++;
|
||||
if (idx == sh->pd_idx)
|
||||
return syndrome_disks;
|
||||
if (idx == sh->qd_idx)
|
||||
return syndrome_disks + 1;
|
||||
slot = (*count)++;
|
||||
if (!sh->ddf_layout)
|
||||
(*count)++;
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
@ -717,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < disks; i++)
|
||||
srcs[i] = (void *)raid6_empty_zero_page;
|
||||
srcs[i] = NULL;
|
||||
|
||||
count = 0;
|
||||
i = d0_idx;
|
||||
|
@ -727,9 +730,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
|
|||
srcs[slot] = sh->dev[i].page;
|
||||
i = raid6_next_disk(i, disks);
|
||||
} while (i != d0_idx);
|
||||
BUG_ON(count != syndrome_disks);
|
||||
|
||||
return count;
|
||||
return syndrome_disks;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
|
@ -814,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
|
|||
* slot number conversion for 'faila' and 'failb'
|
||||
*/
|
||||
for (i = 0; i < disks ; i++)
|
||||
blocks[i] = (void *)raid6_empty_zero_page;
|
||||
blocks[i] = NULL;
|
||||
count = 0;
|
||||
i = d0_idx;
|
||||
do {
|
||||
|
@ -828,7 +830,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
|
|||
failb = slot;
|
||||
i = raid6_next_disk(i, disks);
|
||||
} while (i != d0_idx);
|
||||
BUG_ON(count != syndrome_disks);
|
||||
|
||||
BUG_ON(faila == failb);
|
||||
if (failb < faila)
|
||||
|
@ -845,7 +846,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
|
|||
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
|
||||
ops_complete_compute, sh,
|
||||
to_addr_conv(sh, percpu));
|
||||
return async_gen_syndrome(blocks, 0, count+2,
|
||||
return async_gen_syndrome(blocks, 0, syndrome_disks+2,
|
||||
STRIPE_SIZE, &submit);
|
||||
} else {
|
||||
struct page *dest;
|
||||
|
@ -1139,7 +1140,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
|
|||
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
|
||||
}
|
||||
|
||||
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
{
|
||||
int overlap_clear = 0, i, disks = sh->disks;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
@ -1204,22 +1205,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
|||
put_cpu();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
static void async_run_ops(void *param, async_cookie_t cookie)
|
||||
{
|
||||
struct stripe_head *sh = param;
|
||||
unsigned long ops_request = sh->ops.request;
|
||||
|
||||
clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
|
||||
wake_up(&sh->ops.wait_for_ops);
|
||||
|
||||
__raid_run_ops(sh, ops_request);
|
||||
release_stripe(sh);
|
||||
}
|
||||
|
||||
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
||||
{
|
||||
/* since handle_stripe can be called outside of raid5d context
|
||||
* we need to ensure sh->ops.request is de-staged before another
|
||||
* request arrives
|
||||
*/
|
||||
wait_event(sh->ops.wait_for_ops,
|
||||
!test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
|
||||
sh->ops.request = ops_request;
|
||||
|
||||
atomic_inc(&sh->count);
|
||||
async_schedule(async_run_ops, sh);
|
||||
}
|
||||
#else
|
||||
#define raid_run_ops __raid_run_ops
|
||||
#endif
|
||||
|
||||
static int grow_one_stripe(raid5_conf_t *conf)
|
||||
{
|
||||
struct stripe_head *sh;
|
||||
int disks = max(conf->raid_disks, conf->previous_raid_disks);
|
||||
sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
|
||||
if (!sh)
|
||||
return 0;
|
||||
memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
|
||||
memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev));
|
||||
sh->raid_conf = conf;
|
||||
spin_lock_init(&sh->lock);
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
init_waitqueue_head(&sh->ops.wait_for_ops);
|
||||
#endif
|
||||
|
||||
if (grow_buffers(sh, conf->raid_disks)) {
|
||||
shrink_buffers(sh, conf->raid_disks);
|
||||
if (grow_buffers(sh, disks)) {
|
||||
shrink_buffers(sh, disks);
|
||||
kmem_cache_free(conf->slab_cache, sh);
|
||||
return 0;
|
||||
}
|
||||
sh->disks = conf->raid_disks;
|
||||
/* we just created an active stripe so... */
|
||||
atomic_set(&sh->count, 1);
|
||||
atomic_inc(&conf->active_stripes);
|
||||
|
@ -1231,7 +1265,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
|
|||
static int grow_stripes(raid5_conf_t *conf, int num)
|
||||
{
|
||||
struct kmem_cache *sc;
|
||||
int devs = conf->raid_disks;
|
||||
int devs = max(conf->raid_disks, conf->previous_raid_disks);
|
||||
|
||||
sprintf(conf->cache_name[0],
|
||||
"raid%d-%s", conf->level, mdname(conf->mddev));
|
||||
|
@ -1329,6 +1363,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
|
|||
|
||||
nsh->raid_conf = conf;
|
||||
spin_lock_init(&nsh->lock);
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
init_waitqueue_head(&nsh->ops.wait_for_ops);
|
||||
#endif
|
||||
|
||||
list_add(&nsh->lru, &newstripes);
|
||||
}
|
||||
|
@ -1899,10 +1936,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
|
|||
case ALGORITHM_PARITY_N:
|
||||
break;
|
||||
case ALGORITHM_ROTATING_N_CONTINUE:
|
||||
/* Like left_symmetric, but P is before Q */
|
||||
if (sh->pd_idx == 0)
|
||||
i--; /* P D D D Q */
|
||||
else if (i > sh->pd_idx)
|
||||
i -= 2; /* D D Q P D */
|
||||
else {
|
||||
/* D D Q P D */
|
||||
if (i < sh->pd_idx)
|
||||
i += raid_disks;
|
||||
i -= (sh->pd_idx + 1);
|
||||
}
|
||||
break;
|
||||
case ALGORITHM_LEFT_ASYMMETRIC_6:
|
||||
case ALGORITHM_RIGHT_ASYMMETRIC_6:
|
||||
|
@ -2896,7 +2938,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
|
|||
*
|
||||
*/
|
||||
|
||||
static bool handle_stripe5(struct stripe_head *sh)
|
||||
static void handle_stripe5(struct stripe_head *sh)
|
||||
{
|
||||
raid5_conf_t *conf = sh->raid_conf;
|
||||
int disks = sh->disks, i;
|
||||
|
@ -3167,11 +3209,9 @@ static bool handle_stripe5(struct stripe_head *sh)
|
|||
ops_run_io(sh, &s);
|
||||
|
||||
return_io(return_bi);
|
||||
|
||||
return blocked_rdev == NULL;
|
||||
}
|
||||
|
||||
static bool handle_stripe6(struct stripe_head *sh)
|
||||
static void handle_stripe6(struct stripe_head *sh)
|
||||
{
|
||||
raid5_conf_t *conf = sh->raid_conf;
|
||||
int disks = sh->disks;
|
||||
|
@ -3455,17 +3495,14 @@ static bool handle_stripe6(struct stripe_head *sh)
|
|||
ops_run_io(sh, &s);
|
||||
|
||||
return_io(return_bi);
|
||||
|
||||
return blocked_rdev == NULL;
|
||||
}
|
||||
|
||||
/* returns true if the stripe was handled */
|
||||
static bool handle_stripe(struct stripe_head *sh)
|
||||
static void handle_stripe(struct stripe_head *sh)
|
||||
{
|
||||
if (sh->raid_conf->level == 6)
|
||||
return handle_stripe6(sh);
|
||||
handle_stripe6(sh);
|
||||
else
|
||||
return handle_stripe5(sh);
|
||||
handle_stripe5(sh);
|
||||
}
|
||||
|
||||
static void raid5_activate_delayed(raid5_conf_t *conf)
|
||||
|
@ -3503,9 +3540,10 @@ static void unplug_slaves(mddev_t *mddev)
|
|||
{
|
||||
raid5_conf_t *conf = mddev->private;
|
||||
int i;
|
||||
int devs = max(conf->raid_disks, conf->previous_raid_disks);
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < conf->raid_disks; i++) {
|
||||
for (i = 0; i < devs; i++) {
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
|
||||
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
|
||||
|
@ -4277,9 +4315,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
|
|||
clear_bit(STRIPE_INSYNC, &sh->state);
|
||||
spin_unlock(&sh->lock);
|
||||
|
||||
/* wait for any blocked device to be handled */
|
||||
while (unlikely(!handle_stripe(sh)))
|
||||
;
|
||||
handle_stripe(sh);
|
||||
release_stripe(sh);
|
||||
|
||||
return STRIPE_SECTORS;
|
||||
|
@ -4349,37 +4385,6 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
|
|||
return handled;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
static void __process_stripe(void *param, async_cookie_t cookie)
|
||||
{
|
||||
struct stripe_head *sh = param;
|
||||
|
||||
handle_stripe(sh);
|
||||
release_stripe(sh);
|
||||
}
|
||||
|
||||
static void process_stripe(struct stripe_head *sh, struct list_head *domain)
|
||||
{
|
||||
async_schedule_domain(__process_stripe, sh, domain);
|
||||
}
|
||||
|
||||
static void synchronize_stripe_processing(struct list_head *domain)
|
||||
{
|
||||
async_synchronize_full_domain(domain);
|
||||
}
|
||||
#else
|
||||
static void process_stripe(struct stripe_head *sh, struct list_head *domain)
|
||||
{
|
||||
handle_stripe(sh);
|
||||
release_stripe(sh);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
static void synchronize_stripe_processing(struct list_head *domain)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* This is our raid5 kernel thread.
|
||||
|
@ -4393,7 +4398,6 @@ static void raid5d(mddev_t *mddev)
|
|||
struct stripe_head *sh;
|
||||
raid5_conf_t *conf = mddev->private;
|
||||
int handled;
|
||||
LIST_HEAD(raid_domain);
|
||||
|
||||
pr_debug("+++ raid5d active\n");
|
||||
|
||||
|
@ -4430,7 +4434,9 @@ static void raid5d(mddev_t *mddev)
|
|||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
handled++;
|
||||
process_stripe(sh, &raid_domain);
|
||||
handle_stripe(sh);
|
||||
release_stripe(sh);
|
||||
cond_resched();
|
||||
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
}
|
||||
|
@ -4438,7 +4444,6 @@ static void raid5d(mddev_t *mddev)
|
|||
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
synchronize_stripe_processing(&raid_domain);
|
||||
async_tx_issue_pending_all();
|
||||
unplug_slaves(mddev);
|
||||
|
||||
|
@ -4558,13 +4563,9 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
|||
|
||||
if (!sectors)
|
||||
sectors = mddev->dev_sectors;
|
||||
if (!raid_disks) {
|
||||
if (!raid_disks)
|
||||
/* size is defined by the smallest of previous and new size */
|
||||
if (conf->raid_disks < conf->previous_raid_disks)
|
||||
raid_disks = conf->raid_disks;
|
||||
else
|
||||
raid_disks = conf->previous_raid_disks;
|
||||
}
|
||||
raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
|
||||
|
||||
sectors &= ~((sector_t)mddev->chunk_sectors - 1);
|
||||
sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
|
||||
|
@ -4665,7 +4666,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
|
|||
}
|
||||
per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
|
||||
}
|
||||
scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL);
|
||||
scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
|
||||
if (!scribble) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
|
@ -4686,7 +4687,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
|
|||
static raid5_conf_t *setup_conf(mddev_t *mddev)
|
||||
{
|
||||
raid5_conf_t *conf;
|
||||
int raid_disk, memory;
|
||||
int raid_disk, memory, max_disks;
|
||||
mdk_rdev_t *rdev;
|
||||
struct disk_info *disk;
|
||||
|
||||
|
@ -4722,28 +4723,6 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
|
|||
conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
|
||||
if (conf == NULL)
|
||||
goto abort;
|
||||
|
||||
conf->raid_disks = mddev->raid_disks;
|
||||
conf->scribble_len = scribble_len(conf->raid_disks);
|
||||
if (mddev->reshape_position == MaxSector)
|
||||
conf->previous_raid_disks = mddev->raid_disks;
|
||||
else
|
||||
conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
|
||||
|
||||
conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
|
||||
GFP_KERNEL);
|
||||
if (!conf->disks)
|
||||
goto abort;
|
||||
|
||||
conf->mddev = mddev;
|
||||
|
||||
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
|
||||
goto abort;
|
||||
|
||||
conf->level = mddev->new_level;
|
||||
if (raid5_alloc_percpu(conf) != 0)
|
||||
goto abort;
|
||||
|
||||
spin_lock_init(&conf->device_lock);
|
||||
init_waitqueue_head(&conf->wait_for_stripe);
|
||||
init_waitqueue_head(&conf->wait_for_overlap);
|
||||
|
@ -4757,11 +4736,33 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
|
|||
atomic_set(&conf->active_aligned_reads, 0);
|
||||
conf->bypass_threshold = BYPASS_THRESHOLD;
|
||||
|
||||
conf->raid_disks = mddev->raid_disks;
|
||||
if (mddev->reshape_position == MaxSector)
|
||||
conf->previous_raid_disks = mddev->raid_disks;
|
||||
else
|
||||
conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
|
||||
max_disks = max(conf->raid_disks, conf->previous_raid_disks);
|
||||
conf->scribble_len = scribble_len(max_disks);
|
||||
|
||||
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
|
||||
GFP_KERNEL);
|
||||
if (!conf->disks)
|
||||
goto abort;
|
||||
|
||||
conf->mddev = mddev;
|
||||
|
||||
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
|
||||
goto abort;
|
||||
|
||||
conf->level = mddev->new_level;
|
||||
if (raid5_alloc_percpu(conf) != 0)
|
||||
goto abort;
|
||||
|
||||
pr_debug("raid5: run(%s) called.\n", mdname(mddev));
|
||||
|
||||
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
||||
raid_disk = rdev->raid_disk;
|
||||
if (raid_disk >= conf->raid_disks
|
||||
if (raid_disk >= max_disks
|
||||
|| raid_disk < 0)
|
||||
continue;
|
||||
disk = conf->disks + raid_disk;
|
||||
|
@ -4793,7 +4794,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
|
|||
}
|
||||
|
||||
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
|
||||
conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
|
||||
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
|
||||
if (grow_stripes(conf, conf->max_nr_stripes)) {
|
||||
printk(KERN_ERR
|
||||
"raid5: couldn't allocate %dkB for buffers\n", memory);
|
||||
|
@ -4918,7 +4919,8 @@ static int run(mddev_t *mddev)
|
|||
test_bit(In_sync, &rdev->flags))
|
||||
working_disks++;
|
||||
|
||||
mddev->degraded = conf->raid_disks - working_disks;
|
||||
mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
|
||||
- working_disks);
|
||||
|
||||
if (mddev->degraded > conf->max_degraded) {
|
||||
printk(KERN_ERR "raid5: not enough operational devices for %s"
|
||||
|
|
|
@ -214,12 +214,20 @@ struct stripe_head {
|
|||
int disks; /* disks in stripe */
|
||||
enum check_states check_state;
|
||||
enum reconstruct_states reconstruct_state;
|
||||
/* stripe_operations
|
||||
/**
|
||||
* struct stripe_operations
|
||||
* @target - STRIPE_OP_COMPUTE_BLK target
|
||||
* @target2 - 2nd compute target in the raid6 case
|
||||
* @zero_sum_result - P and Q verification flags
|
||||
* @request - async service request flags for raid_run_ops
|
||||
*/
|
||||
struct stripe_operations {
|
||||
int target, target2;
|
||||
enum sum_check_flags zero_sum_result;
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
unsigned long request;
|
||||
wait_queue_head_t wait_for_ops;
|
||||
#endif
|
||||
} ops;
|
||||
struct r5dev {
|
||||
struct bio req;
|
||||
|
@ -294,6 +302,8 @@ struct r6_state {
|
|||
#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
|
||||
#define STRIPE_BIOFILL_RUN 14
|
||||
#define STRIPE_COMPUTE_RUN 15
|
||||
#define STRIPE_OPS_REQ_PENDING 16
|
||||
|
||||
/*
|
||||
* Operation request flags
|
||||
*/
|
||||
|
@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout)
|
|||
{
|
||||
return (layout >= 0 && layout <= 5)
|
||||
||
|
||||
(layout == 8 || layout == 10)
|
||||
(layout >= 8 && layout <= 10)
|
||||
||
|
||||
(layout >= 16 && layout <= 20);
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*
|
||||
* $#-way unrolled portable integer math RAID-6 instruction set
|
||||
*
|
||||
* This file is postprocessed using unroll.pl
|
||||
* This file is postprocessed using unroll.awk
|
||||
*
|
||||
* <benh> hpa: in process,
|
||||
* you can just "steal" the vec unit with enable_kernel_altivec() (but
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*
|
||||
* $#-way unrolled portable integer math RAID-6 instruction set
|
||||
*
|
||||
* This file is postprocessed using unroll.pl
|
||||
* This file is postprocessed using unroll.awk
|
||||
*/
|
||||
|
||||
#include <linux/raid/pq.h>
|
||||
|
|
|
@ -7,7 +7,7 @@ CC = gcc
|
|||
OPTFLAGS = -O2 # Adjust as desired
|
||||
CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
|
||||
LD = ld
|
||||
PERL = perl
|
||||
AWK = awk
|
||||
AR = ar
|
||||
RANLIB = ranlib
|
||||
|
||||
|
@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
|
|||
raid6test: test.c raid6.a
|
||||
$(CC) $(CFLAGS) -o raid6test $^
|
||||
|
||||
raid6altivec1.c: raid6altivec.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 1 < raid6altivec.uc > $@
|
||||
raid6altivec1.c: raid6altivec.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
|
||||
|
||||
raid6altivec2.c: raid6altivec.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 2 < raid6altivec.uc > $@
|
||||
raid6altivec2.c: raid6altivec.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
|
||||
|
||||
raid6altivec4.c: raid6altivec.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 4 < raid6altivec.uc > $@
|
||||
raid6altivec4.c: raid6altivec.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
|
||||
|
||||
raid6altivec8.c: raid6altivec.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 8 < raid6altivec.uc > $@
|
||||
raid6altivec8.c: raid6altivec.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
|
||||
|
||||
raid6int1.c: raid6int.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 1 < raid6int.uc > $@
|
||||
raid6int1.c: raid6int.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
|
||||
|
||||
raid6int2.c: raid6int.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 2 < raid6int.uc > $@
|
||||
raid6int2.c: raid6int.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
|
||||
|
||||
raid6int4.c: raid6int.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 4 < raid6int.uc > $@
|
||||
raid6int4.c: raid6int.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
|
||||
|
||||
raid6int8.c: raid6int.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 8 < raid6int.uc > $@
|
||||
raid6int8.c: raid6int.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
|
||||
|
||||
raid6int16.c: raid6int.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 16 < raid6int.uc > $@
|
||||
raid6int16.c: raid6int.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
|
||||
|
||||
raid6int32.c: raid6int.uc ../unroll.pl
|
||||
$(PERL) ../unroll.pl 32 < raid6int.uc > $@
|
||||
raid6int32.c: raid6int.uc ../unroll.awk
|
||||
$(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
|
||||
|
||||
raid6tables.c: mktables
|
||||
./mktables > raid6tables.c
|
||||
|
|
20
drivers/md/unroll.awk
Normal file
20
drivers/md/unroll.awk
Normal file
|
@ -0,0 +1,20 @@
|
|||
|
||||
# This filter requires one command line option of form -vN=n
|
||||
# where n must be a decimal number.
|
||||
#
|
||||
# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
|
||||
# Replace each $# with n, and each $* with a single $.
|
||||
|
||||
BEGIN {
|
||||
n = N + 0
|
||||
}
|
||||
{
|
||||
if (/\$\$/) { rep = n } else { rep = 1 }
|
||||
for (i = 0; i < rep; ++i) {
|
||||
tmp = $0
|
||||
gsub(/\$\$/, i, tmp)
|
||||
gsub(/\$\#/, n, tmp)
|
||||
gsub(/\$\*/, "$", tmp)
|
||||
print tmp
|
||||
}
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
#!/usr/bin/perl
|
||||
#
|
||||
# Take a piece of C code and for each line which contains the sequence $$
|
||||
# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced
|
||||
# by the unrolling factor, and $* with a single $
|
||||
#
|
||||
|
||||
($n) = @ARGV;
|
||||
$n += 0;
|
||||
|
||||
while ( defined($line = <STDIN>) ) {
|
||||
if ( $line =~ /\$\$/ ) {
|
||||
$rep = $n;
|
||||
} else {
|
||||
$rep = 1;
|
||||
}
|
||||
for ( $i = 0 ; $i < $rep ; $i++ ) {
|
||||
$tmp = $line;
|
||||
$tmp =~ s/\$\$/$i/g;
|
||||
$tmp =~ s/\$\#/$n/g;
|
||||
$tmp =~ s/\$\*/\$/g;
|
||||
print $tmp;
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue