@ -26,7 +26,7 @@ struct list_head mtd_partitions;
struct mtd_part {
struct mtd_info mtd ;
struct mtd_info * master ;
u_int32 _t offset ;
uint64 _t offset ;
int index ;
struct list_head list ;
int registered ;
@ -44,50 +44,32 @@ struct mtd_part {
* to the _real_ device .
*/
static int part_read ( struct mtd_info * mtd , loff_t from , size_t len ,
size_t * retlen , u_char * buf )
static int part_read ( struct mtd_info * mtd , loff_t from , size_t len ,
size_t * retlen , u_char * buf )
{
struct mtd_part * part = PART ( mtd ) ;
struct mtd_ecc_stats stats ;
int res ;
stats = part - > master - > ecc_stats ;
if ( from > = mtd - > size )
len = 0 ;
else if ( from + len > mtd - > size )
len = mtd - > size - from ;
res = part - > master - > read ( part - > master , from + part - > offset ,
res = part - > master - > read ( part - > master , from + part - > offset ,
len , retlen , buf ) ;
if ( unlikely ( res ) ) {
if ( res = = - EUCLEAN )
mtd - > ecc_stats . corrected + + ;
mtd - > ecc_stats . corrected + = part - > master - > ecc_stats . corrected - stats . corrected ;
if ( res = = - EBADMSG )
mtd - > ecc_stats . failed + + ;
mtd - > ecc_stats . failed + = part - > master - > ecc_stats . failed - stats . failed ;
}
return res ;
}
# ifdef MTD_LINUX
static int part_point ( struct mtd_info * mtd , loff_t from , size_t len ,
size_t * retlen , void * * virt , resource_size_t * phys )
{
struct mtd_part * part = PART ( mtd ) ;
if ( from > = mtd - > size )
len = 0 ;
else if ( from + len > mtd - > size )
len = mtd - > size - from ;
return part - > master - > point ( part - > master , from + part - > offset ,
len , retlen , virt , phys ) ;
}
static void part_unpoint ( struct mtd_info * mtd , loff_t from , size_t len )
{
struct mtd_part * part = PART ( mtd ) ;
part - > master - > unpoint ( part - > master , from + part - > offset , len ) ;
}
# endif
static int part_read_oob ( struct mtd_info * mtd , loff_t from ,
struct mtd_oob_ops * ops )
struct mtd_oob_ops * ops )
{
struct mtd_part * part = PART ( mtd ) ;
int res ;
@ -107,38 +89,38 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return res ;
}
static int part_read_user_prot_reg ( struct mtd_info * mtd , loff_t from , size_t len ,
size_t * retlen , u_char * buf )
static int part_read_user_prot_reg ( struct mtd_info * mtd , loff_t from ,
size_t len , size_t * retlen , u_char * buf )
{
struct mtd_part * part = PART ( mtd ) ;
return part - > master - > read_user_prot_reg ( part - > master , from ,
return part - > master - > read_user_prot_reg ( part - > master , from ,
len , retlen , buf ) ;
}
static int part_get_user_prot_info ( struct mtd_info * mtd ,
struct otp_info * buf , size_t len )
static int part_get_user_prot_info ( struct mtd_info * mtd ,
struct otp_info * buf , size_t len )
{
struct mtd_part * part = PART ( mtd ) ;
return part - > master - > get_user_prot_info ( part - > master , buf , len ) ;
return part - > master - > get_user_prot_info ( part - > master , buf , len ) ;
}
static int part_read_fact_prot_reg ( struct mtd_info * mtd , loff_t from , size_t len ,
size_t * retlen , u_char * buf )
static int part_read_fact_prot_reg ( struct mtd_info * mtd , loff_t from ,
size_t len , size_t * retlen , u_char * buf )
{
struct mtd_part * part = PART ( mtd ) ;
return part - > master - > read_fact_prot_reg ( part - > master , from ,
return part - > master - > read_fact_prot_reg ( part - > master , from ,
len , retlen , buf ) ;
}
static int part_get_fact_prot_info ( struct mtd_info * mtd ,
struct otp_info * buf , size_t len )
static int part_get_fact_prot_info ( struct mtd_info * mtd , struct otp_info * buf ,
size_t len )
{
struct mtd_part * part = PART ( mtd ) ;
return part - > master - > get_fact_prot_info ( part - > master , buf , len ) ;
return part - > master - > get_fact_prot_info ( part - > master , buf , len ) ;
}
static int part_write ( struct mtd_info * mtd , loff_t to , size_t len ,
size_t * retlen , const u_char * buf )
static int part_write ( struct mtd_info * mtd , loff_t to , size_t len ,
size_t * retlen , const u_char * buf )
{
struct mtd_part * part = PART ( mtd ) ;
if ( ! ( mtd - > flags & MTD_WRITEABLE ) )
@ -147,13 +129,12 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
len = 0 ;
else if ( to + len > mtd - > size )
len = mtd - > size - to ;
return part - > master - > write ( part - > master , to + part - > offset ,
return part - > master - > write ( part - > master , to + part - > offset ,
len , retlen , buf ) ;
}
# ifdef MTD_LINUX
static int part_panic_write ( struct mtd_info * mtd , loff_t to , size_t len ,
size_t * retlen , const u_char * buf )
static int part_panic_write ( struct mtd_info * mtd , loff_t to , size_t len ,
size_t * retlen , const u_char * buf )
{
struct mtd_part * part = PART ( mtd ) ;
if ( ! ( mtd - > flags & MTD_WRITEABLE ) )
@ -162,13 +143,12 @@ static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
len = 0 ;
else if ( to + len > mtd - > size )
len = mtd - > size - to ;
return part - > master - > panic_write ( part - > master , to + part - > offset ,
return part - > master - > panic_write ( part - > master , to + part - > offset ,
len , retlen , buf ) ;
}
# endif
static int part_write_oob ( struct mtd_info * mtd , loff_t to ,
struct mtd_oob_ops * ops )
struct mtd_oob_ops * ops )
{
struct mtd_part * part = PART ( mtd ) ;
@ -182,33 +162,22 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
return part - > master - > write_oob ( part - > master , to + part - > offset , ops ) ;
}
static int part_write_user_prot_reg ( struct mtd_info * mtd , loff_t from , size_t len ,
size_t * retlen , u_char * buf )
static int part_write_user_prot_reg ( struct mtd_info * mtd , loff_t from ,
size_t len , size_t * retlen , u_char * buf )
{
struct mtd_part * part = PART ( mtd ) ;
return part - > master - > write_user_prot_reg ( part - > master , from ,
return part - > master - > write_user_prot_reg ( part - > master , from ,
len , retlen , buf ) ;
}
static int part_lock_user_prot_reg ( struct mtd_info * mtd , loff_t from , size_t len )
{
struct mtd_part * part = PART ( mtd ) ;
return part - > master - > lock_user_prot_reg ( part - > master , from , len ) ;
}
# ifdef MTD_LINUX
static int part_writev ( struct mtd_info * mtd , const struct kvec * vecs ,
unsigned long count , loff_t to , size_t * retlen )
static int part_lock_user_prot_reg ( struct mtd_info * mtd , loff_t from ,
size_t len )
{
struct mtd_part * part = PART ( mtd ) ;
if ( ! ( mtd - > flags & MTD_WRITEABLE ) )
return - EROFS ;
return part - > master - > writev ( part - > master , vecs , count ,
to + part - > offset , retlen ) ;
return part - > master - > lock_user_prot_reg ( part - > master , from , len ) ;
}
# endif
static int part_erase ( struct mtd_info * mtd , struct erase_info * instr )
static int part_erase ( struct mtd_info * mtd , struct erase_info * instr )
{
struct mtd_part * part = PART ( mtd ) ;
int ret ;
@ -219,7 +188,7 @@ static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
instr - > addr + = part - > offset ;
ret = part - > master - > erase ( part - > master , instr ) ;
if ( ret ) {
if ( instr - > fail_addr ! = 0xffffffff )
if ( instr - > fail_addr ! = MTD_FAIL_ADDR_UNKNOWN )
instr - > fail_addr - = part - > offset ;
instr - > addr - = part - > offset ;
}
@ -231,19 +200,15 @@ void mtd_erase_callback(struct erase_info *instr)
if ( instr - > mtd - > erase = = part_erase ) {
struct mtd_part * part = PART ( instr - > mtd ) ;
if ( instr - > fail_addr ! = 0xffffffff )
if ( instr - > fail_addr ! = MTD_FAIL_ADDR_UNKNOWN )
instr - > fail_addr - = part - > offset ;
instr - > addr - = part - > offset ;
}
if ( instr - > callback )
instr - > callback ( instr ) ;
}
# ifdef MTD_LINUX
EXPORT_SYMBOL_GPL ( mtd_erase_callback ) ;
# endif
# ifdef MTD_LINUX
static int part_lock ( struct mtd_info * mtd , loff_t ofs , size_t len )
static int part_lock ( struct mtd_info * mtd , loff_t ofs , uint64_t len )
{
struct mtd_part * part = PART ( mtd ) ;
if ( ( len + ofs ) > mtd - > size )
@ -251,14 +216,13 @@ static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
return part - > master - > lock ( part - > master , ofs + part - > offset , len ) ;
}
static int part_unlock ( struct mtd_info * mtd , loff_t ofs , size _t len )
static int part_unlock ( struct mtd_info * mtd , loff_t ofs , uint64 _t len )
{
struct mtd_part * part = PART ( mtd ) ;
if ( ( len + ofs ) > mtd - > size )
return - EINVAL ;
return part - > master - > unlock ( part - > master , ofs + part - > offset , len ) ;
}
# endif
static void part_sync ( struct mtd_info * mtd )
{
@ -266,7 +230,6 @@ static void part_sync(struct mtd_info *mtd)
part - > master - > sync ( part - > master ) ;
}
# ifdef MTD_LINUX
static int part_suspend ( struct mtd_info * mtd )
{
struct mtd_part * part = PART ( mtd ) ;
@ -278,9 +241,8 @@ static void part_resume(struct mtd_info *mtd)
struct mtd_part * part = PART ( mtd ) ;
part - > master - > resume ( part - > master ) ;
}
# endif
static int part_block_isbad ( struct mtd_info * mtd , loff_t ofs )
static int part_block_isbad ( struct mtd_info * mtd , loff_t ofs )
{
struct mtd_part * part = PART ( mtd ) ;
if ( ofs > = mtd - > size )
@ -289,7 +251,7 @@ static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
return part - > master - > block_isbad ( part - > master , ofs ) ;
}
static int part_block_markbad ( struct mtd_info * mtd , loff_t ofs )
static int part_block_markbad ( struct mtd_info * mtd , loff_t ofs )
{
struct mtd_part * part = PART ( mtd ) ;
int res ;
@ -300,10 +262,8 @@ static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
return - EINVAL ;
ofs + = part - > offset ;
res = part - > master - > block_markbad ( part - > master , ofs ) ;
# ifdef MTD_LINUX
if ( ! res )
mtd - > ecc_stats . badblocks + + ;
# endif
return res ;
}
@ -314,31 +274,193 @@ static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
int del_mtd_partitions ( struct mtd_info * master )
{
struct list_head * node ;
struct mtd_part * slave ;
struct mtd_part * slave , * next ;
for ( node = mtd_partitions . next ;
node ! = & mtd_partitions ;
node = node - > next ) {
slave = list_entry ( node , struct mtd_part , list ) ;
list_for_each_entry_safe ( slave , next , & mtd_partitions , list )
if ( slave - > master = = master ) {
struct list_head * prev = node - > prev ;
__list_del ( prev , node - > next ) ;
if ( slave - > registered )
list_del ( & slave - > list ) ;
if ( slave - > registered )
del_mtd_device ( & slave - > mtd ) ;
kfree ( slave ) ;
node = prev ;
}
}
return 0 ;
}
static struct mtd_part * add_one_partition ( struct mtd_info * master ,
const struct mtd_partition * part , int partno ,
uint64_t cur_offset )
{
struct mtd_part * slave ;
/* allocate the partition structure */
slave = kzalloc ( sizeof ( * slave ) , GFP_KERNEL ) ;
if ( ! slave ) {
printk ( KERN_ERR " memory allocation error while creating partitions for \" %s \" \n " ,
master - > name ) ;
del_mtd_partitions ( master ) ;
return NULL ;
}
list_add ( & slave - > list , & mtd_partitions ) ;
/* set up the MTD object for this partition */
slave - > mtd . type = master - > type ;
slave - > mtd . flags = master - > flags & ~ part - > mask_flags ;
slave - > mtd . size = part - > size ;
slave - > mtd . writesize = master - > writesize ;
slave - > mtd . oobsize = master - > oobsize ;
slave - > mtd . oobavail = master - > oobavail ;
slave - > mtd . subpage_sft = master - > subpage_sft ;
slave - > mtd . name = part - > name ;
slave - > mtd . owner = master - > owner ;
slave - > mtd . read = part_read ;
slave - > mtd . write = part_write ;
if ( master - > panic_write )
slave - > mtd . panic_write = part_panic_write ;
if ( master - > read_oob )
slave - > mtd . read_oob = part_read_oob ;
if ( master - > write_oob )
slave - > mtd . write_oob = part_write_oob ;
if ( master - > read_user_prot_reg )
slave - > mtd . read_user_prot_reg = part_read_user_prot_reg ;
if ( master - > read_fact_prot_reg )
slave - > mtd . read_fact_prot_reg = part_read_fact_prot_reg ;
if ( master - > write_user_prot_reg )
slave - > mtd . write_user_prot_reg = part_write_user_prot_reg ;
if ( master - > lock_user_prot_reg )
slave - > mtd . lock_user_prot_reg = part_lock_user_prot_reg ;
if ( master - > get_user_prot_info )
slave - > mtd . get_user_prot_info = part_get_user_prot_info ;
if ( master - > get_fact_prot_info )
slave - > mtd . get_fact_prot_info = part_get_fact_prot_info ;
if ( master - > sync )
slave - > mtd . sync = part_sync ;
if ( ! partno & & master - > suspend & & master - > resume ) {
slave - > mtd . suspend = part_suspend ;
slave - > mtd . resume = part_resume ;
}
if ( master - > lock )
slave - > mtd . lock = part_lock ;
if ( master - > unlock )
slave - > mtd . unlock = part_unlock ;
if ( master - > block_isbad )
slave - > mtd . block_isbad = part_block_isbad ;
if ( master - > block_markbad )
slave - > mtd . block_markbad = part_block_markbad ;
slave - > mtd . erase = part_erase ;
slave - > master = master ;
slave - > offset = part - > offset ;
slave - > index = partno ;
if ( slave - > offset = = MTDPART_OFS_APPEND )
slave - > offset = cur_offset ;
if ( slave - > offset = = MTDPART_OFS_NXTBLK ) {
slave - > offset = cur_offset ;
if ( mtd_mod_by_eb ( cur_offset , master ) ! = 0 ) {
/* Round up to next erasesize */
slave - > offset = ( mtd_div_by_eb ( cur_offset , master ) + 1 ) * master - > erasesize ;
printk ( KERN_NOTICE " Moving partition %d: "
" 0x%012llx -> 0x%012llx \n " , partno ,
( unsigned long long ) cur_offset , ( unsigned long long ) slave - > offset ) ;
}
}
if ( slave - > mtd . size = = MTDPART_SIZ_FULL )
slave - > mtd . size = master - > size - slave - > offset ;
printk ( KERN_NOTICE " 0x%012llx-0x%012llx : \" %s \" \n " , ( unsigned long long ) slave - > offset ,
( unsigned long long ) ( slave - > offset + slave - > mtd . size ) , slave - > mtd . name ) ;
/* let's do some sanity checks */
if ( slave - > offset > = master - > size ) {
/* let's register it anyway to preserve ordering */
slave - > offset = 0 ;
slave - > mtd . size = 0 ;
printk ( KERN_ERR " mtd: partition \" %s \" is out of reach -- disabled \n " ,
part - > name ) ;
goto out_register ;
}
if ( slave - > offset + slave - > mtd . size > master - > size ) {
slave - > mtd . size = master - > size - slave - > offset ;
printk ( KERN_WARNING " mtd: partition \" %s \" extends beyond the end of device \" %s \" -- size truncated to %#llx \n " ,
part - > name , master - > name , ( unsigned long long ) slave - > mtd . size ) ;
}
if ( master - > numeraseregions > 1 ) {
/* Deal with variable erase size stuff */
int i , max = master - > numeraseregions ;
u64 end = slave - > offset + slave - > mtd . size ;
struct mtd_erase_region_info * regions = master - > eraseregions ;
/* Find the first erase regions which is part of this
* partition . */
for ( i = 0 ; i < max & & regions [ i ] . offset < = slave - > offset ; i + + )
;
/* The loop searched for the region _behind_ the first one */
i - - ;
/* Pick biggest erasesize */
for ( ; i < max & & regions [ i ] . offset < end ; i + + ) {
if ( slave - > mtd . erasesize < regions [ i ] . erasesize ) {
slave - > mtd . erasesize = regions [ i ] . erasesize ;
}
}
BUG_ON ( slave - > mtd . erasesize = = 0 ) ;
} else {
/* Single erase size */
slave - > mtd . erasesize = master - > erasesize ;
}
if ( ( slave - > mtd . flags & MTD_WRITEABLE ) & &
mtd_mod_by_eb ( slave - > offset , & slave - > mtd ) ) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
slave - > mtd . flags & = ~ MTD_WRITEABLE ;
printk ( KERN_WARNING " mtd: partition \" %s \" doesn't start on an erase block boundary -- force read-only \n " ,
part - > name ) ;
}
if ( ( slave - > mtd . flags & MTD_WRITEABLE ) & &
mtd_mod_by_eb ( slave - > mtd . size , & slave - > mtd ) ) {
slave - > mtd . flags & = ~ MTD_WRITEABLE ;
printk ( KERN_WARNING " mtd: partition \" %s \" doesn't end on an erase block -- force read-only \n " ,
part - > name ) ;
}
slave - > mtd . ecclayout = master - > ecclayout ;
if ( master - > block_isbad ) {
uint64_t offs = 0 ;
while ( offs < slave - > mtd . size ) {
if ( master - > block_isbad ( master ,
offs + slave - > offset ) )
slave - > mtd . ecc_stats . badblocks + + ;
offs + = slave - > mtd . erasesize ;
}
}
out_register :
if ( part - > mtdp ) {
/* store the object pointer (caller may or may not register it*/
* part - > mtdp = & slave - > mtd ;
slave - > registered = 0 ;
} else {
/* register our partition */
add_mtd_device ( & slave - > mtd ) ;
slave - > registered = 1 ;
}
return slave ;
}
/*
* This function , given a master MTD object and a partition table , creates
* and registers slave MTD objects which are bound to the master according to
* the partition definitions .
* ( Q : should we register the master MTD object as well ? )
*
* We don ' t register the master , or expect the caller to have done so ,
* for reasons of data integrity .
*/
int add_mtd_partitions ( struct mtd_info * master ,
@ -346,7 +468,7 @@ int add_mtd_partitions(struct mtd_info *master,
int nbparts )
{
struct mtd_part * slave ;
u_int32 _t cur_offset = 0 ;
uint64 _t cur_offset = 0 ;
int i ;
/*
@ -357,184 +479,14 @@ int add_mtd_partitions(struct mtd_info *master,
if ( mtd_partitions . next = = NULL )
INIT_LIST_HEAD ( & mtd_partitions ) ;
printk ( KERN_NOTICE " Creating %d MTD partitions on \" %s \" : \n " , nbparts , master - > name ) ;
printk ( KERN_NOTICE " Creating %d MTD partitions on \" %s \" : \n " , nbparts , master - > name ) ;
for ( i = 0 ; i < nbparts ; i + + ) {
/* allocate the partition structure */
slave = kzalloc ( sizeof ( * slave ) , GFP_KERNEL ) ;
if ( ! slave ) {
printk ( " memory allocation error while creating partitions for \" %s \" \n " ,
master - > name ) ;
del_mtd_partitions ( master ) ;
slave = add_one_partition ( master , parts + i , i , cur_offset ) ;
if ( ! slave )
return - ENOMEM ;
}
list_add ( & slave - > list , & mtd_partitions ) ;
/* set up the MTD object for this partition */
slave - > mtd . type = master - > type ;
slave - > mtd . flags = master - > flags & ~ parts [ i ] . mask_flags ;
slave - > mtd . size = parts [ i ] . size ;
slave - > mtd . writesize = master - > writesize ;
slave - > mtd . oobsize = master - > oobsize ;
slave - > mtd . oobavail = master - > oobavail ;
slave - > mtd . subpage_sft = master - > subpage_sft ;
slave - > mtd . name = parts [ i ] . name ;
slave - > mtd . owner = master - > owner ;
slave - > mtd . read = part_read ;
slave - > mtd . write = part_write ;
# ifdef MTD_LINUX
if ( master - > panic_write )
slave - > mtd . panic_write = part_panic_write ;
if ( master - > point & & master - > unpoint ) {
slave - > mtd . point = part_point ;
slave - > mtd . unpoint = part_unpoint ;
}
# endif
if ( master - > read_oob )
slave - > mtd . read_oob = part_read_oob ;
if ( master - > write_oob )
slave - > mtd . write_oob = part_write_oob ;
if ( master - > read_user_prot_reg )
slave - > mtd . read_user_prot_reg = part_read_user_prot_reg ;
if ( master - > read_fact_prot_reg )
slave - > mtd . read_fact_prot_reg = part_read_fact_prot_reg ;
if ( master - > write_user_prot_reg )
slave - > mtd . write_user_prot_reg = part_write_user_prot_reg ;
if ( master - > lock_user_prot_reg )
slave - > mtd . lock_user_prot_reg = part_lock_user_prot_reg ;
if ( master - > get_user_prot_info )
slave - > mtd . get_user_prot_info = part_get_user_prot_info ;
if ( master - > get_fact_prot_info )
slave - > mtd . get_fact_prot_info = part_get_fact_prot_info ;
if ( master - > sync )
slave - > mtd . sync = part_sync ;
# ifdef MTD_LINUX
if ( ! i & & master - > suspend & & master - > resume ) {
slave - > mtd . suspend = part_suspend ;
slave - > mtd . resume = part_resume ;
}
if ( master - > writev )
slave - > mtd . writev = part_writev ;
if ( master - > lock )
slave - > mtd . lock = part_lock ;
if ( master - > unlock )
slave - > mtd . unlock = part_unlock ;
# endif
if ( master - > block_isbad )
slave - > mtd . block_isbad = part_block_isbad ;
if ( master - > block_markbad )
slave - > mtd . block_markbad = part_block_markbad ;
slave - > mtd . erase = part_erase ;
slave - > master = master ;
slave - > offset = parts [ i ] . offset ;
slave - > index = i ;
if ( slave - > offset = = MTDPART_OFS_APPEND )
slave - > offset = cur_offset ;
if ( slave - > offset = = MTDPART_OFS_NXTBLK ) {
slave - > offset = cur_offset ;
if ( ( cur_offset % master - > erasesize ) ! = 0 ) {
/* Round up to next erasesize */
slave - > offset = ( ( cur_offset / master - > erasesize ) + 1 ) * master - > erasesize ;
printk ( KERN_NOTICE " Moving partition %d: "
" 0x%08x -> 0x%08x \n " , i ,
cur_offset , slave - > offset ) ;
}
}
if ( slave - > mtd . size = = MTDPART_SIZ_FULL )
slave - > mtd . size = master - > size - slave - > offset ;
cur_offset = slave - > offset + slave - > mtd . size ;
printk ( KERN_NOTICE " 0x%08x-0x%08x : \" %s \" \n " , slave - > offset ,
slave - > offset + slave - > mtd . size , slave - > mtd . name ) ;
/* let's do some sanity checks */
if ( slave - > offset > = master - > size ) {
/* let's register it anyway to preserve ordering */
slave - > offset = 0 ;
slave - > mtd . size = 0 ;
printk ( " mtd: partition \" %s \" is out of reach -- disabled \n " ,
parts [ i ] . name ) ;
}
if ( slave - > offset + slave - > mtd . size > master - > size ) {
slave - > mtd . size = master - > size - slave - > offset ;
printk ( " mtd: partition \" %s \" extends beyond the end of device \" %s \" -- size truncated to %#x \n " ,
parts [ i ] . name , master - > name , slave - > mtd . size ) ;
}
if ( master - > numeraseregions > 1 ) {
/* Deal with variable erase size stuff */
int i ;
struct mtd_erase_region_info * regions = master - > eraseregions ;
/* Find the first erase regions which is part of this partition. */
for ( i = 0 ; i < master - > numeraseregions & & slave - > offset > = regions [ i ] . offset ; i + + )
;
for ( i - - ; i < master - > numeraseregions & & slave - > offset + slave - > mtd . size > regions [ i ] . offset ; i + + ) {
if ( slave - > mtd . erasesize < regions [ i ] . erasesize ) {
slave - > mtd . erasesize = regions [ i ] . erasesize ;
}
}
} else {
/* Single erase size */
slave - > mtd . erasesize = master - > erasesize ;
}
if ( ( slave - > mtd . flags & MTD_WRITEABLE ) & &
( slave - > offset % slave - > mtd . erasesize ) ) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
slave - > mtd . flags & = ~ MTD_WRITEABLE ;
printk ( " mtd: partition \" %s \" doesn't start on an erase block boundary -- force read-only \n " ,
parts [ i ] . name ) ;
}
if ( ( slave - > mtd . flags & MTD_WRITEABLE ) & &
( slave - > mtd . size % slave - > mtd . erasesize ) ) {
slave - > mtd . flags & = ~ MTD_WRITEABLE ;
printk ( " mtd: partition \" %s \" doesn't end on an erase block -- force read-only \n " ,
parts [ i ] . name ) ;
}
slave - > mtd . ecclayout = master - > ecclayout ;
if ( master - > block_isbad ) {
uint32_t offs = 0 ;
while ( offs < slave - > mtd . size ) {
if ( master - > block_isbad ( master ,
offs + slave - > offset ) )
slave - > mtd . ecc_stats . badblocks + + ;
offs + = slave - > mtd . erasesize ;
}
}
# ifdef MTD_LINUX
if ( parts [ i ] . mtdp ) {
/* store the object pointer
* ( caller may or may not register it */
* parts [ i ] . mtdp = & slave - > mtd ;
slave - > registered = 0 ;
} else {
/* register our partition */
add_mtd_device ( & slave - > mtd ) ;
slave - > registered = 1 ;
}
# else
/* register our partition */
add_mtd_device ( & slave - > mtd ) ;
slave - > registered = 1 ;
# endif
}
return 0 ;
}
# ifdef MTD_LINUX
EXPORT_SYMBOL ( add_mtd_partitions ) ;
EXPORT_SYMBOL ( del_mtd_partitions ) ;
# endif