fs/mnemofs: Fix journal log rw issue, read size issue

Fixes the journal log read and write size and overlap issues, along with read return value issue.

Signed-off-by: Saurav Pal <resyfer.dev@gmail.com>
This commit is contained in:
Saurav Pal 2024-08-17 06:40:07 +00:00 committed by Alan Carvalho de Assis
parent 9d396bf89e
commit daa3168cfb
11 changed files with 1411 additions and 602 deletions

View file

@ -295,7 +295,7 @@ int nand_ram_eraseblock(FAR struct nand_raw_s *raw, off_t block)
for (i = start_page; i < end_page; i++)
{
nand_ram_flash_spare[i].n_erase++;
nand_ram_flash_spare[i].free = 1;
nand_ram_flash_spare[i].free = NAND_RAM_PAGE_FREE;
}
NAND_RAM_LOG("[LOWER %lu | %s] Done\n", nand_ram_ins_i, "eraseblock");
@ -331,12 +331,11 @@ int nand_ram_rawread(FAR struct nand_raw_s *raw, off_t block,
struct nand_ram_data_s *read_page_data;
struct nand_ram_spare_s *read_page_spare;
ret = OK;
read_page = (block << NAND_RAM_LOG_PAGES_PER_BLOCK) + page;
read_page_data = nand_ram_flash_data + read_page;
read_page_spare = nand_ram_flash_spare + read_page;
ret = OK;
nxmutex_lock(&nand_ram_dev_mut);
nand_ram_ins_i++;
@ -362,7 +361,8 @@ int nand_ram_rawread(FAR struct nand_raw_s *raw, off_t block,
}
else
{
memcpy(data, (const void *)read_page_data, NAND_RAM_PAGE_SIZE);
memcpy(data, (const void *)read_page_data->page,
NAND_RAM_PAGE_SIZE);
}
}
@ -407,12 +407,11 @@ int nand_ram_rawwrite(FAR struct nand_raw_s *raw, off_t block,
struct nand_ram_data_s *write_page_data;
struct nand_ram_spare_s *write_page_spare;
ret = OK;
write_page = (block << NAND_RAM_LOG_PAGES_PER_BLOCK) + page;
write_page_data = nand_ram_flash_data + write_page;
write_page_spare = nand_ram_flash_spare + write_page;
ret = OK;
nxmutex_lock(&nand_ram_dev_mut);
nand_ram_ins_i++;
@ -429,16 +428,17 @@ int nand_ram_rawwrite(FAR struct nand_raw_s *raw, off_t block,
}
nand_ram_flash_spare[write_page].n_write++;
nand_ram_flash_spare[write_page].free = NAND_RAM_PAGE_WRITTEN;
memset((void *)write_page_data, 0, NAND_RAM_PAGE_SIZE);
memset((void *)write_page_data->page, 0, NAND_RAM_PAGE_SIZE);
if (data != NULL)
{
memcpy((void *)write_page_data, data, NAND_RAM_PAGE_SIZE);
memcpy((void *)write_page_data->page, data, NAND_RAM_PAGE_SIZE);
}
if (spare != NULL)
{
memcpy((void *)write_page_spare, data, NAND_RAM_PAGE_SIZE);
memcpy((void *)write_page_spare, data, NAND_RAM_SPARE_SIZE);
}
NAND_RAM_LOG("[LOWER %lu | %s] Done\n", nand_ram_ins_i, "rawwrite");

View file

@ -116,7 +116,7 @@ static int mnemofs_close(FAR struct file *filep);
static ssize_t mnemofs_read(FAR struct file *filep, FAR char *buffer,
size_t buflen);
static ssize_t mnemofs_write(FAR struct file *filep, FAR const char *buffer,
size_t buflen);
size_t buflen);
static off_t mnemofs_seek(FAR struct file *filep, off_t offset,
int whence);
static int mnemofs_ioctl(FAR struct file *filep, int cmd,
@ -214,10 +214,6 @@ const struct mountpt_operations g_mnemofs_operations =
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: mnemofs_open
*
@ -393,8 +389,7 @@ static int mnemofs_open(FAR struct file *filep, FAR const char *relpath,
f->com->off = f->com->sz;
}
finfo("[TMP1] %p %p", &sb->of, &f->list);
list_add_tail(&sb->of, &f->list);
list_add_tail(&MFS_OFILES(sb), &f->list);
filep->f_priv = f;
nxmutex_unlock(&MFS_LOCK(sb));
@ -471,23 +466,31 @@ static int mnemofs_close(FAR struct file *filep)
/* Flushing in-memory data to on-flash journal. */
ret = mfs_lru_ctzflush(sb, f->com->path, f->com->depth);
if (predict_false(ret < 0))
{
finfo("Error while flushing file. Ret: %d.", ret);
goto errout_with_lock;
}
finfo("Original refcount is %u.", f->com->refcount);
f->com->refcount--;
if (f->com->refcount == 0)
{
ret = mnemofs_flush(sb);
if (predict_false(ret < 0))
{
finfo("Error while flushing. Ret: %d.", ret);
goto errout_with_lock;
}
kmm_free(f->com->path);
kmm_free(f->com);
finfo("Refcount is 0, open file structure freed.");
finfo("Open file structure freed.");
ret = mnemofs_flush(sb);
if (predict_false(ret < 0))
{
goto errout_with_fcom;
}
}
errout_with_fcom:
list_delete(&f->list);
kmm_free(f);
filep->f_priv = NULL;
@ -524,14 +527,14 @@ errout:
*
* Returned Value:
* 0 - OK
* < 0 - Error
* <= 0 - Bytes read.
*
****************************************************************************/
static ssize_t mnemofs_read(FAR struct file *filep, FAR char *buffer,
size_t buflen)
{
int ret = 0;
ssize_t ret = 0;
FAR struct inode *inode;
FAR struct mfs_sb_s *sb;
FAR struct mfs_ofd_s *f;
@ -567,6 +570,11 @@ static ssize_t mnemofs_read(FAR struct file *filep, FAR char *buffer,
/* Read data in CTZ from the current offset. */
buflen = MIN(buflen, f->com->sz - f->com->off); /* TODO: Need to consider
* if this needs to be
* lower down the chain.
*/
ret = mfs_lru_rdfromoff(sb, f->com->off, f->com->path, f->com->depth,
buffer, buflen);
if (ret < 0)
@ -575,6 +583,8 @@ static ssize_t mnemofs_read(FAR struct file *filep, FAR char *buffer,
goto errout_with_lock;
}
ret = buflen;
/* Update offset. */
f->com->off += buflen;
@ -602,14 +612,14 @@ errout:
*
* Returned Value:
* 0 - OK
* < 0 - Error
* <= 0 - Bytes written.
*
****************************************************************************/
static ssize_t mnemofs_write(FAR struct file *filep, FAR const char *buffer,
size_t buflen)
size_t buflen)
{
int ret = OK;
ssize_t ret = OK;
FAR struct inode *inode;
FAR struct mfs_sb_s *sb;
FAR struct mfs_ofd_s *f;
@ -654,7 +664,8 @@ static ssize_t mnemofs_write(FAR struct file *filep, FAR const char *buffer,
/* Update offset and size. */
f->com->off += buflen;
f->com->sz = MAX(f->com->sz, f->com->off);
f->com->sz = MAX(f->com->sz, f->com->off);
ret = buflen;
finfo("Offset updated to %u and size to %u", f->com->off, f->com->sz);
@ -688,7 +699,7 @@ errout:
*
* Returned Value:
* 0 - OK
* < 0 - Error
* <= 0 - Final position.
*
****************************************************************************/
@ -752,6 +763,7 @@ static off_t mnemofs_seek(FAR struct file *filep, off_t offset, int whence)
}
f->com->off = pos;
ret = pos;
finfo("Final position %u.", pos);
@ -933,7 +945,7 @@ static int mnemofs_sync(FAR struct file *filep)
f = filep->f_priv;
DEBUGASSERT(f != NULL);
ret = mfs_lru_ctzflush(sb, f->com->path, f->com->depth);
ret = mnemofs_flush(sb);
nxmutex_unlock(&MFS_LOCK(sb));
finfo("Lock released.");
@ -1014,7 +1026,8 @@ static int mnemofs_dup(FAR const struct file *oldp, FAR struct file *newp)
/* Add the new upper half to the list of open fds. */
list_add_tail(&sb->of, &nf->list);
list_add_tail(&MFS_OFILES(sb), &nf->list);
newp->f_priv = nf;
finfo("New file descriptor added to the end of the list of open files.");
nxmutex_unlock(&MFS_LOCK(sb));
@ -1139,8 +1152,8 @@ errout:
****************************************************************************/
static int mnemofs_opendir(FAR struct inode *mountpt,
FAR const char *relpath,
FAR struct fs_dirent_s **dir)
FAR const char *relpath,
FAR struct fs_dirent_s **dir)
{
int ret = OK;
int flags;
@ -1287,8 +1300,8 @@ static int mnemofs_closedir(FAR struct inode *mountpt,
****************************************************************************/
static int mnemofs_readdir(FAR struct inode *mountpt,
FAR struct fs_dirent_s *dir,
FAR struct dirent *entry)
FAR struct fs_dirent_s *dir,
FAR struct dirent *entry)
{
int ret = OK;
FAR struct mfs_sb_s *sb;
@ -1396,7 +1409,7 @@ errout:
****************************************************************************/
static int mnemofs_rewinddir(FAR struct inode *mountpt,
FAR struct fs_dirent_s *dir)
FAR struct fs_dirent_s *dir)
{
int ret = OK;
FAR struct mfs_sb_s *sb;
@ -1463,12 +1476,15 @@ errout:
static int mnemofs_bind(FAR struct inode *driver, FAR const void *data,
FAR void** handle)
{
int ret = OK;
bool format = false;
FAR char buf[8];
mfs_t i = 0;
FAR struct mfs_sb_s *sb = NULL;
struct mtd_geometry_s geo;
int ret = OK;
bool format = false;
FAR char buf[8];
mfs_t i = 0;
mfs_t mnblk1;
mfs_t mnblk2;
mfs_t jrnl_blk;
FAR struct mfs_sb_s *sb = NULL;
struct mtd_geometry_s geo;
finfo("Mnemofs bind.");
@ -1516,22 +1532,23 @@ static int mnemofs_bind(FAR struct inode *driver, FAR const void *data,
finfo("Lock acquired.");
sb->drv = driver;
sb->pg_sz = geo.blocksize;
sb->blk_sz = geo.erasesize;
sb->n_blks = geo.neraseblocks;
sb->pg_in_blk = MFS_BLKSZ(sb) / sb->pg_sz;
sb->drv = driver;
sb->pg_sz = geo.blocksize;
sb->blk_sz = geo.erasesize;
sb->n_blks = geo.neraseblocks;
sb->pg_in_blk = MFS_BLKSZ(sb) / sb->pg_sz;
#ifdef CONFIG_MNEMOFS_JOURNAL_NBLKS
sb->j_nblks = CONFIG_MNEMOFS_JOURNAL_NBLKS;
MFS_JRNL(sb).n_blks = CONFIG_MNEMOFS_JOURNAL_NBLKS;
#else
sb->j_nblks = MIN(5, MFS_NBLKS(sb) / 2);
MFS_JRNL(sb).n_blks = MIN(5, MFS_NBLKS(sb) / 2);
#endif
sb->log_blk_sz = log2(MFS_BLKSZ(sb));
sb->log_pg_sz = log2(sb->pg_sz);
sb->log_pg_in_blk = log2(sb->pg_in_blk);
sb->log_n_blks = log2(MFS_NBLKS(sb));
sb->log_blk_sz = log2(MFS_BLKSZ(sb));
sb->log_pg_sz = log2(sb->pg_sz);
sb->log_pg_in_blk = log2(sb->pg_in_blk);
sb->log_n_blks = log2(MFS_NBLKS(sb));
MFS_FLUSH(sb) = false;
list_initialize(&sb->of);
list_initialize(&MFS_OFILES(sb));
sb->rw_buf = kmm_zalloc(MFS_PGSZ(sb));
if (predict_false(sb->rw_buf == NULL))
@ -1542,8 +1559,6 @@ static int mnemofs_bind(FAR struct inode *driver, FAR const void *data,
/* TODO: Print the super block in Block 0. */
srand(time(NULL));
mfs_ba_init(sb);
mfs_lru_init(sb);
if (!strncmp(data, "autoformat", 11))
{
@ -1575,6 +1590,9 @@ static int mnemofs_bind(FAR struct inode *driver, FAR const void *data,
else
{
finfo("Device already formatted.\n");
mfs_ba_init(sb);
mfs_lru_init(sb);
}
}
@ -1587,8 +1605,19 @@ static int mnemofs_bind(FAR struct inode *driver, FAR const void *data,
finfo("Force format.\n");
}
ret = mfs_jrnl_fmt(sb, 0, 0);
if (ret != OK)
mfs_ba_fmt(sb);
mfs_lru_init(sb);
mnblk1 = 0;
mnblk2 = 0;
ret = mfs_jrnl_fmt(sb, &mnblk1, &mnblk2, &jrnl_blk);
if (predict_false(ret < 0))
{
goto errout_with_sb;
}
ret = mfs_mn_fmt(sb, mnblk1, mnblk2, jrnl_blk);
if (predict_false(ret < 0))
{
goto errout_with_sb;
}
@ -1672,7 +1701,7 @@ static int mnemofs_unbind(FAR void *handle, FAR struct inode **driver,
static int mnemofs_statfs(FAR struct inode *mountpt, FAR struct statfs *buf)
{
int ret = OK;
int ret = OK;
FAR struct mfs_sb_s *sb;
finfo("Mnemofs statfs.");
@ -1728,9 +1757,9 @@ errout:
static int mnemofs_unlink(FAR struct inode *mountpt, FAR const char *relpath)
{
int ret = OK;
int ret_flags;
mfs_t depth;
int ret = OK;
int ret_flags;
mfs_t depth;
FAR struct mfs_sb_s *sb;
FAR struct mfs_path_s *path;
@ -1755,7 +1784,7 @@ static int mnemofs_unlink(FAR struct inode *mountpt, FAR const char *relpath)
goto errout_with_lock;
}
mfs_pitr_rm(sb, path, depth);
mfs_pitr_rm(sb, path, depth, true);
mfs_free_patharr(path);
@ -1790,7 +1819,7 @@ errout:
****************************************************************************/
static int mnemofs_mkdir(FAR struct inode *mountpt, FAR const char *relpath,
mode_t mode)
mode_t mode)
{
int ret = OK;
int flags;
@ -1941,7 +1970,7 @@ static int mnemofs_rmdir(FAR struct inode *mountpt, FAR const char *relpath)
mfs_pitr_free(&pitr);
mfs_pitr_rm(sb, path, depth);
mfs_pitr_rm(sb, path, depth, true);
errout_with_pitr:
mfs_free_patharr(path);
@ -2061,7 +2090,7 @@ static int mnemofs_rename(FAR struct inode *mountpt,
mfs_pitr_reset(&npitr);
mfs_pitr_rm(sb, npath, ndepth);
mfs_pitr_rm(sb, npath, ndepth, false);
}
mfs_pitr_adv_tochild(&npitr, npath);
@ -2133,6 +2162,9 @@ static int mnemofs_stat(FAR struct inode *mountpt, FAR const char *relpath,
finfo("Lock acquired.");
finfo("Master node: Root (%u, %u), Size %u", MFS_MN(sb).root_ctz.idx_e,
MFS_MN(sb).root_ctz.pg_e, MFS_MN(sb).root_sz);
ret_flags = mfs_get_patharr(sb, relpath, &path, &depth);
if ((ret_flags & MFS_EXIST) == 0)
{
@ -2187,3 +2219,59 @@ errout:
finfo("Ret %d", ret);
return ret;
}
/****************************************************************************
* Public Functions
****************************************************************************/
int mnemofs_flush(FAR struct mfs_sb_s *sb)
{
int ret = OK;
bool change;
/* Emtpy the LRU, and maybe the journal as well. */
finfo("Flush operation started.");
for (; ; )
{
change = false;
if (!mfs_lru_isempty(sb))
{
finfo("LRU needs to be flushed.");
change = true;
ret = mfs_lru_flush(sb);
if (predict_false(ret < 0))
{
goto errout;
}
}
if (!mfs_jrnl_isempty(sb) &&
MFS_JRNL(sb).log_cblkidx >= MFS_JRNL_LIM(sb))
{
finfo("Journal needs to be flushed.");
change = true;
ret = mfs_jrnl_flush(sb);
if (predict_false(ret < 0))
{
goto errout;
}
}
if (!change)
{
break;
}
finfo("Finished Iteration.");
}
errout:
return ret;
}
/* TODO: Superblock still doesn't exist. Plus bug fixes. */

View file

@ -86,14 +86,17 @@
#define MFS_RWBUF(sb) ((sb)->rw_buf)
#define MFS_BA(sb) ((sb)->ba_state)
#define MFS_NBLKS(sb) ((sb)->n_blks)
#define MFS_OFILES(sb) ((sb)->of)
#define MFS_FLUSH(sb) ((sb)->flush)
#define MFS_NPGS(sb) (MFS_NBLKS(sb) * MFS_PGINBLK(sb))
#define MFS_HASHSZ 16
#define MFS_CTZ_SZ(l) ((l)->sz)
#define MFS_DIRENTSZ(dirent) ((mfs_t) (sizeof(struct mfs_dirent_s) \
+ (dirent)->namelen))
#define MFS_DIRENTSZ(dirent) ((2 * 2) + 4 + (16 * 3) + 8 + 1 \
+ (dirent)->namelen)
#define MFS_JRNL_LIM(sb) (MFS_JRNL(sb).n_blks) /* TODO: 50-75% */
#define MFS_JRNL_LIM(sb) (MFS_JRNL(sb).n_blks / 2)
#define MFS_TRAVERSE_INITSZ 8
/****************************************************************************
* Public Types
@ -149,7 +152,7 @@ struct mfs_mn_s
{
mfs_t pg; /* Only mblk1's pg will be used here. */
mfs_t jrnl_blk; /* Start of journal. */
mfs_t mblk_idx;
mfs_t mblk_idx; /* Index for next MN entry inside blk. */
struct mfs_ctz_s root_ctz;
mfs_t root_sz;
struct timespec ts;
@ -170,7 +173,7 @@ struct mfs_jrnl_state_s
mfs_t log_sblkidx; /* First jrnl blk index. TODO: jrnlarr > 1 blk. */
mfs_t jrnlarr_pg;
mfs_t jrnlarr_pgoff;
uint16_t n_blks;
uint16_t n_blks; /* TODO: Does not include the master node. */
};
struct mfs_sb_s
@ -185,15 +188,14 @@ struct mfs_sb_s
uint8_t log_blk_sz;
mfs_t n_blks;
uint8_t log_n_blks;
mfs_t n_lru;
uint16_t pg_in_blk;
uint8_t log_pg_in_blk;
uint8_t j_nblks;
struct mfs_mn_s mn; /* Master Node */
struct mfs_jrnl_state_s j_state; /* Journal State */
struct mfs_ba_state_s ba_state; /* Block Allocator State */
struct list_node lru;
struct list_node of; /* open files. */
bool flush;
};
/* This is for *dir VFS methods. */
@ -417,6 +419,10 @@ static inline mfs_t mfs_popcnt(mfs_t x)
* Public Function Prototypes
****************************************************************************/
/* mnemofs.c */
int mnemofs_flush(FAR struct mfs_sb_s *sb);
/* mnemofs_journal.c */
/****************************************************************************
@ -459,7 +465,8 @@ int mfs_jrnl_init(FAR struct mfs_sb_s * const sb, mfs_t blk);
*
****************************************************************************/
int mfs_jrnl_fmt(FAR struct mfs_sb_s * const sb, mfs_t blk1, mfs_t blk2);
int mfs_jrnl_fmt(FAR struct mfs_sb_s * const sb, mfs_t *blk1, mfs_t *blk2,
FAR mfs_t *jrnl_blk);
/****************************************************************************
* Name: mfs_jrnl_free
@ -551,7 +558,7 @@ int mfs_jrnl_updatedinfo(FAR const struct mfs_sb_s * const sb,
****************************************************************************/
int mfs_jrnl_wrlog(FAR struct mfs_sb_s * const sb,
const struct mfs_node_s node,
FAR const struct mfs_node_s *node,
const struct mfs_ctz_s loc_new, const mfs_t sz_new);
/****************************************************************************
@ -572,8 +579,41 @@ int mfs_jrnl_wrlog(FAR struct mfs_sb_s * const sb,
int mfs_jrnl_flush(FAR struct mfs_sb_s * const sb);
/****************************************************************************
* Name: mfs_jrnl_isempty
*
* Description:
* Check if the journal is empty.
*
* Input Parameters:
* sb - Superblock instance of the device.
*
* Returned Value:
* True is the journal is empty, false otherwise.
*
****************************************************************************/
bool mfs_jrnl_isempty(FAR const struct mfs_sb_s * const sb);
/* mnemofs_blkalloc.c */
/****************************************************************************
* Name: mfs_ba_init
*
* Description:
* Formats and initializes the block allocator.
*
* Input Parameters:
* sb - Superblock instance of the device.
*
* Returned Value:
* 0 - OK
* -ENOMEM - No memory left.
*
****************************************************************************/
int mfs_ba_fmt(FAR struct mfs_sb_s * const sb);
/****************************************************************************
* Name: mfs_ba_init
*
@ -1172,7 +1212,7 @@ int mfs_ctz_rdfromoff(FAR const struct mfs_sb_s * const sb,
mfs_t len, FAR char * buf);
/****************************************************************************
* Name: mfs_ctz_wrtnode_new
* Name: mfs_ctz_wrtnode
*
* Description:
* Write an LRU node to the flash. It also adds a log of it to the journal.
@ -1192,7 +1232,37 @@ int mfs_ctz_rdfromoff(FAR const struct mfs_sb_s * const sb,
****************************************************************************/
int mfs_ctz_wrtnode(FAR struct mfs_sb_s * const sb,
const struct mfs_node_s * const node);
FAR const struct mfs_node_s * const node,
FAR struct mfs_ctz_s *new_loc);
/****************************************************************************
* Name: mfs_ctz_travel
*
* Description:
* From CTZ block at page `pg_src` and index `idx_src`, give the page
* number of index `idx_dest`.
*
* The source is preferably the last CTZ block in the CTZ list, but it can
* realistically be any CTZ block in the CTZ list whos position is known.
* However, `idx_dest <= idx_src` has to be followed. Takes O(log(n))
* complexity to travel.
*
* Input Parameters:
* sb - Superblock instance of the device.
* idx_src - Index of the source ctz block.
* pg_src - Page number of the source ctz block.
* idx_dest - Index of the destination ctz block.
*
* Returned Value:
* The page number corresponding to `idx_dest`.
*
* Assumptions/Limitations:
* `idx_dest <= idx_src`.
*
****************************************************************************/
mfs_t mfs_ctz_travel(FAR const struct mfs_sb_s * const sb,
mfs_t idx_src, mfs_t pg_src, mfs_t idx_dest);
/* mnemofs_lru.c */
@ -1325,7 +1395,10 @@ int mfs_lru_updatedinfo(FAR const struct mfs_sb_s * const sb,
int mfs_lru_updatectz(FAR struct mfs_sb_s * sb,
FAR struct mfs_path_s * const path, const mfs_t depth,
const struct mfs_ctz_s new_ctz);
const struct mfs_ctz_s new_ctz, mfs_t new_sz);
bool mfs_lru_isempty(FAR struct mfs_sb_s * const sb);
int mfs_lru_flush(FAR struct mfs_sb_s * const sb);
/* mnemofs_master.c */
@ -1369,7 +1442,8 @@ int mfs_mn_init(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk);
*
****************************************************************************/
int mfs_mn_fmt(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk);
int mfs_mn_fmt(FAR struct mfs_sb_s * const sb, const mfs_t blk1,
const mfs_t blk2, const mfs_t jrnl_blk);
/****************************************************************************
* Name: mfs_mn_move
@ -1394,6 +1468,10 @@ int mfs_mn_fmt(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk);
int mfs_mn_move(FAR struct mfs_sb_s * const sb, struct mfs_ctz_s root,
const mfs_t root_sz);
int mfs_mn_sync(FAR struct mfs_sb_s *sb,
FAR struct mfs_path_s * const new_loc,
const mfs_t blk1, const mfs_t blk2, const mfs_t jrnl_blk);
/* mnemofs_fsobj.c */
/****************************************************************************
@ -1817,7 +1895,10 @@ int mfs_pitr_rmdirent(FAR struct mfs_sb_s * const sb,
int mfs_pitr_rm(FAR struct mfs_sb_s * const sb,
FAR struct mfs_path_s * const path,
const mfs_t depth);
const mfs_t depth, bool rm_child);
int mfs_pitr_traversefs(FAR struct mfs_sb_s * sb, const struct mfs_ctz_s ctz,
int type);
#undef EXTERN
#ifdef __cplusplus

View file

@ -236,11 +236,11 @@ static int is_pg_writeable(FAR struct mfs_sb_s * const sb, mfs_t pg,
static int is_blk_writeable(FAR struct mfs_sb_s * const sb, const mfs_t blk)
{
mfs_t idx;
int blkbad_status;
mfs_t i;
mfs_t pg = MFS_BLK2PG(sb, blk);
mfs_t idx;
uint8_t off;
mfs_t pg = MFS_BLK2PG(sb, blk);
mfs_t i;
int blkbad_status;
/* Bad block check. */
@ -279,15 +279,15 @@ static int is_blk_writeable(FAR struct mfs_sb_s * const sb, const mfs_t blk)
* Public Functions
****************************************************************************/
int mfs_ba_init(FAR struct mfs_sb_s * const sb)
int mfs_ba_fmt(FAR struct mfs_sb_s * const sb)
{
int ret = OK;
uint8_t log;
/* We need at least 5 blocks, as one is occupied by superblock, at least
* one for the journal, 2 for journal's master blocks, and at least one for
* actual data.
*/
/* We need at least 5 blocks, as one is occupied by superblock, at least
* one for the journal, 2 for journal's master blocks, and at least one for
* actual data.
*/
if (MFS_NBLKS(sb) < 5)
{
@ -326,8 +326,6 @@ int mfs_ba_init(FAR struct mfs_sb_s * const sb)
goto errout_with_k_del;
}
/* TODO: Fill MFS_BA(sb).bmap_upgs after tree traversal. */
finfo("mnemofs: Block Allocator initialized, starting at page %d.\n",
MFS_BLK2PG(sb, MFS_BA(sb).s_blk));
return ret;
@ -339,6 +337,33 @@ errout:
return ret;
}
int mfs_ba_init(FAR struct mfs_sb_s * const sb)
{
/* TODO: Ensure journal and master node are initialized before this. */
int ret = OK;
ret = mfs_ba_fmt(sb);
if (predict_false(ret < 0))
{
goto errout;
}
/* Traverse the FS tree. */
ret = mfs_pitr_traversefs(sb, MFS_MN(sb).root_ctz, MFS_ISDIR);
if (predict_false(ret < 0))
{
goto errout_with_ba;
}
errout_with_ba:
mfs_ba_free(sb);
errout:
return ret;
}
void mfs_ba_free(FAR struct mfs_sb_s * const sb)
{
kmm_free(MFS_BA(sb).k_del);

View file

@ -122,8 +122,6 @@ static void ctz_off2loc(FAR const struct mfs_sb_s * const sb, mfs_t off,
FAR mfs_t *idx, FAR mfs_t *pgoff);
static mfs_t ctz_blkdatasz(FAR const struct mfs_sb_s * const sb,
const mfs_t idx);
static mfs_t ctz_travel(FAR const struct mfs_sb_s * const sb, mfs_t idx_src,
mfs_t pg_src, mfs_t idx_dest);
static void ctz_copyidxptrs(FAR const struct mfs_sb_s * const sb,
FAR struct mfs_ctz_s ctz, const mfs_t idx,
FAR char *buf);
@ -183,7 +181,7 @@ static mfs_t ctz_idx_nptrs(const mfs_t idx)
static void ctz_off2loc(FAR const struct mfs_sb_s * const sb, mfs_t off,
FAR mfs_t *idx, FAR mfs_t *pgoff)
{
const mfs_t wb = sizeof(mfs_t);
const mfs_t wb = sizeof(mfs_t);
const mfs_t den = MFS_PGSZ(sb) - 2 * wb;
if (off < den)
@ -234,33 +232,380 @@ static mfs_t ctz_blkdatasz(FAR const struct mfs_sb_s * const sb,
}
/****************************************************************************
* Name: ctz_travel
* Name: ctz_copyidxptrs
*
* Description:
* From CTZ block at page `pg_src` and index `idx_src`, give the page
* number of index `idx_dest`.
* This is used for cases when you want to expand a CTZ list from any point
* in the list. If we want to expand the CTZ list from a particular index,
* say `start_idx`, while keeping all indexes before it untouched, we
* would need to first allocate new blocks on the flash, and then copy
* the pointers to the location.
*
* The source is preferably the last CTZ block in the CTZ list, but it can
* realistically be any CTZ block in the CTZ list whos position is known.
* However, `idx_dest <= idx_src` has to be followed. Takes O(log(n))
* complexity to travel.
* Usage of this function is, the caller needs to first allocate a CTZ
* block (a page on flash), allocate buffer which is the size of a CTZ
* block (a page on flash), and use this method to copy the pointers to the
* buffer, then write the data to the flash.
*
* Input Parameters:
* sb - Superblock instance of the device.
* idx_src - Index of the source ctz block.
* pg_src - Page number of the source ctz block.
* idx_dest - Index of the destination ctz block.
*
* Returned Value:
* The page number corresponding to `idx_dest`.
* sb - Superblock instance of the device.
* ctz - CTZ list to use as a reference.
* idx - Index of the block who's supposed pointers are to be copied.
* buf - Buffer representing the entire CTZ block where pointers are
* copied to.
*
* Assumptions/Limitations:
* `idx_dest <= idx_src`.
* This assumes `idx` is not more than `ctz->idx_e + 1`.
*
****************************************************************************/
static mfs_t ctz_travel(FAR const struct mfs_sb_s * const sb, mfs_t idx_src,
mfs_t pg_src, mfs_t idx_dest)
static void ctz_copyidxptrs(FAR const struct mfs_sb_s * const sb,
FAR struct mfs_ctz_s ctz, const mfs_t idx,
FAR char *buf)
{
mfs_t i;
mfs_t n_ptrs;
mfs_t prev_pg;
mfs_t prev_idx;
if (idx == 0)
{
/* No pointers for first block. */
return;
}
n_ptrs = ctz_idx_nptrs(idx);
if (idx != ctz.idx_e + 1)
{
/* We travel to the second last "known" CTZ block. */
ctz.pg_e = mfs_ctz_travel(sb, ctz.idx_e, ctz.pg_e, idx - 1);
ctz.idx_e = idx - 1;
}
buf += MFS_PGSZ(sb); /* Go to buf + pg_sz */
DEBUGASSERT(idx == ctz.idx_e + 1);
finfo("Copying %u pointers for CTZ (%u, %u) at index %u.", n_ptrs,
ctz.idx_e, ctz.pg_e, idx);
for (i = 0; i < n_ptrs; i++)
{
if (predict_false(i == 0))
{
prev_idx = ctz.idx_e;
prev_pg = ctz.pg_e;
}
else
{
prev_pg = mfs_ctz_travel(sb, prev_idx, prev_pg, prev_idx - 1);
prev_idx--;
}
ctz.idx_e = prev_idx;
/* Do buf + pg_sz - (idx * sizeof(mfs_t)) iteratively. */
buf -= MFS_CTZ_PTRSZ;
mfs_ser_mfs(prev_pg, buf);
finfo("Copied %u page number to %uth pointer.", prev_pg, i);
}
}
/****************************************************************************
* Public Functions
****************************************************************************/
int mfs_ctz_rdfromoff(FAR const struct mfs_sb_s * const sb,
const struct mfs_ctz_s ctz, mfs_t data_off,
mfs_t len, FAR char * buf)
{
int ret = OK;
mfs_t i;
mfs_t rd_sz;
mfs_t cur_pg;
mfs_t cur_idx;
mfs_t cur_pgoff;
mfs_t end_idx;
mfs_t end_pgoff;
mfs_t pg_rd_sz;
finfo("Reading (%u, %u) CTZ from %u offset for %u bytes.", ctz.idx_e,
ctz.pg_e, data_off, len);
if (ctz.idx_e == 0 && ctz.pg_e == 0)
{
goto errout;
}
ctz_off2loc(sb, data_off + len, &cur_idx, &cur_pgoff);
ctz_off2loc(sb, data_off, &end_idx, &end_pgoff);
DEBUGASSERT(ctz.idx_e < cur_idx); /* TODO: Need to consider this. For now, there is a temporary fix in read(). */
if (ctz.idx_e < end_idx)
{
goto errout;
}
cur_pg = mfs_ctz_travel(sb, ctz.idx_e, ctz.pg_e, cur_idx);
rd_sz = 0;
if (predict_false(cur_pg == 0))
{
goto errout;
}
/* O(n) read by reading in reverse. */
finfo("Started reading. Current Idx: %u, End Idx: %u.", cur_idx, end_idx);
if (cur_idx != end_idx)
{
for (i = cur_idx; i >= end_idx; i--)
{
finfo("Current index %u, Current Page %u.", i, cur_pg);
if (predict_false(i == cur_idx))
{
pg_rd_sz = cur_pgoff;
ret = mfs_read_page(sb, buf - pg_rd_sz, pg_rd_sz, cur_pg,
0);
cur_pgoff = 0;
}
else if (predict_false(i == end_idx))
{
pg_rd_sz = ctz_blkdatasz(sb, i) - end_pgoff;
ret = mfs_read_page(sb, buf - pg_rd_sz, pg_rd_sz, cur_pg,
end_pgoff);
}
else
{
pg_rd_sz = ctz_blkdatasz(sb, i);
ret = mfs_read_page(sb, buf - pg_rd_sz, pg_rd_sz, cur_pg,
0);
}
if (predict_false(ret == 0))
{
ret = -EINVAL;
goto errout;
}
buf -= pg_rd_sz;
}
cur_pg = mfs_ctz_travel(sb, cur_idx, cur_pg, cur_idx - 1);
if (predict_false(cur_pg == 0))
{
ret = -EINVAL;
goto errout;
}
}
else
{
ret = mfs_read_page(sb, buf, len, cur_pg, end_pgoff);
if (predict_false(ret < 0))
{
goto errout;
}
ret = OK;
}
finfo("Reading finished.");
errout:
return ret;
}
int mfs_ctz_wrtnode(FAR struct mfs_sb_s * const sb,
FAR const struct mfs_node_s * const node,
FAR struct mfs_ctz_s *new_loc)
{
int ret = OK;
bool written = false;
mfs_t prev;
mfs_t rem_sz;
mfs_t new_pg;
mfs_t cur_pg;
mfs_t cur_idx;
mfs_t cur_pgoff;
mfs_t lower;
mfs_t upper;
mfs_t upper_og;
mfs_t lower_upd;
mfs_t upper_upd;
mfs_t del_bytes;
FAR char *buf = NULL;
FAR char *tmp = NULL;
struct mfs_ctz_s ctz;
FAR struct mfs_delta_s *delta;
finfo("Write LRU node %p at depth %u, with %u delta(s) to flash.", node,
node->depth, list_length(&node->delta));
/* Traverse common CTZ blocks. */
ctz_off2loc(sb, node->range_min, &cur_idx, &cur_pgoff);
ctz = node->path[node->depth - 1].ctz;
cur_pg = mfs_ctz_travel(sb, ctz.idx_e, ctz.pg_e, cur_idx);
/* So, till cur_idx - 1, the CTZ blocks are common. */
buf = kmm_zalloc(MFS_PGSZ(sb));
if (predict_false(buf == NULL))
{
ret = -ENOMEM;
goto errout;
}
/* Initially, there might be some offset in cur_idx CTZ blocks that is
* unmodified as well.
*/
finfo("Initial read.");
tmp = buf;
mfs_read_page(sb, tmp, cur_pgoff, cur_pg, 0);
tmp += cur_pgoff;
/* Modifications. */
prev = 0;
rem_sz = node->sz;
lower = node->range_min;
del_bytes = 0;
/* [lower, upper) range. Two pointer approach. Window gets narrower
* for every delete falling inside it.
*/
while (rem_sz > 0)
{
upper = MIN(prev + lower + ctz_blkdatasz(sb, cur_idx), rem_sz);
upper_og = upper;
finfo("Remaining Size %u. Lower %u, Upper %u, Current Offset %u",
rem_sz, lower, upper, tmp - buf);
/* Retrieving original data. */
ret = mfs_ctz_rdfromoff(sb, ctz, lower + del_bytes, upper - lower,
tmp);
if (predict_false(ret < 0))
{
goto errout_with_buf;
}
list_for_every_entry(&node->delta, delta, struct mfs_delta_s, list)
{
finfo("Checking delta %p in node %p. Offset %u, bytes %u.", delta,
node, delta->off, delta->n_b);
lower_upd = MAX(lower, delta->off);
upper_upd = MIN(upper, delta->off + delta->n_b);
if (lower_upd >= upper_upd)
{
/* Skip this delta. */
continue;
}
if (delta->upd == NULL)
{
finfo("Node type: Delete");
/* Delete */
del_bytes += upper_upd - lower_upd;
memmove(tmp + (lower_upd - lower), tmp + (upper_upd - lower),
upper - upper_upd);
upper -= upper_upd;
}
else
{
finfo("Node type: Update");
/* Update */
memcpy(tmp + (lower_upd - lower),
delta->upd + (lower_upd - delta->off),
upper_upd - lower_upd);
}
}
/* rem_sz check for final write. */
if (upper == upper_og || rem_sz == upper - lower)
{
prev = 0;
/* Time to write a page for new CTZ list. */
new_pg = mfs_ba_getpg(sb);
if (predict_false(new_pg == 0))
{
ret = -ENOSPC;
goto errout_with_buf;
}
ctz_copyidxptrs(sb, ctz, cur_idx, buf);
ret = mfs_write_page(sb, buf, MFS_PGSZ(sb), new_pg, 0);
if (predict_false(ret == 0))
{
ret = -EINVAL;
goto errout_with_buf;
}
memset(buf, 0, MFS_PGSZ(sb));
tmp = buf;
ctz.idx_e = cur_idx;
ctz.pg_e = new_pg;
cur_idx++;
written = true;
finfo("Written data to page %u.", new_pg);
}
else
{
tmp += upper - lower;
written = false;
}
prev = upper - lower;
rem_sz -= upper - lower;
lower = upper;
}
DEBUGASSERT(written);
/* TODO: Need to verify for cases where the delete extends outside, etc. */
/* Write log. Assumes journal has enough space due to the limit. */
finfo("Writing log.");
*new_loc = ctz;
ret = mfs_jrnl_wrlog(sb, node, ctz, node->sz);
if (predict_false(ret < 0))
{
goto errout_with_buf;
}
errout_with_buf:
kmm_free(buf);
errout:
return ret;
}
mfs_t mfs_ctz_travel(FAR const struct mfs_sb_s * const sb,
mfs_t idx_src, mfs_t pg_src, mfs_t idx_dest)
{
char buf[4];
mfs_t pg;
@ -315,347 +660,3 @@ static mfs_t ctz_travel(FAR const struct mfs_sb_s * const sb, mfs_t idx_src,
return pg;
}
/****************************************************************************
* Name: ctz_copyidxptrs
*
* Description:
* This is used for cases when you want to expand a CTZ list from any point
* in the list. If we want to expand the CTZ list from a particular index,
* say `start_idx`, while keeping all indexes before it untouched, we
* would need to first allocate new blocks on the flash, and then copy
* the pointers to the location.
*
* Usage of this function is, the caller needs to first allocate a CTZ
* block (a page on flash), allocate buffer which is the size of a CTZ
* block (a page on flash), and use this method to copy the pointers to the
* buffer, then write the data to the flash.
*
* Input Parameters:
* sb - Superblock instance of the device.
* ctz - CTZ list to use as a reference.
* idx - Index of the block who's supposed pointers are to be copied.
* buf - Buffer representing the entire CTZ block where pointers are
* copied to.
*
* Assumptions/Limitations:
* This assumes `idx` is not more than `ctz->idx_e + 1`.
*
****************************************************************************/
static void ctz_copyidxptrs(FAR const struct mfs_sb_s * const sb,
FAR struct mfs_ctz_s ctz, const mfs_t idx,
FAR char *buf)
{
mfs_t i;
mfs_t n_ptrs;
mfs_t prev_pg;
mfs_t prev_idx;
if (idx == 0)
{
/* No pointers for first block. */
return;
}
n_ptrs = ctz_idx_nptrs(idx);
if (idx != ctz.idx_e + 1)
{
/* We travel to the second last "known" CTZ block. */
ctz.pg_e = ctz_travel(sb, ctz.idx_e, ctz.pg_e, idx - 1);
ctz.idx_e = idx - 1;
}
buf += MFS_PGSZ(sb); /* Go to buf + pg_sz */
DEBUGASSERT(idx == ctz.idx_e + 1);
finfo("Copying %u pointers for CTZ (%u, %u) at index %u.", n_ptrs,
ctz.idx_e, ctz.pg_e, idx);
for (i = 0; i < n_ptrs; i++)
{
if (predict_false(i == 0))
{
prev_idx = ctz.idx_e;
prev_pg = ctz.pg_e;
}
else
{
prev_pg = ctz_travel(sb, prev_idx, prev_pg, prev_idx - 1);
prev_idx--;
}
ctz.idx_e = prev_idx;
/* Do buf + pg_sz - (idx * sizeof(mfs_t)) iteratively. */
buf -= MFS_CTZ_PTRSZ;
mfs_ser_mfs(prev_pg, buf);
finfo("Copied %u page number to %uth pointer.", prev_pg, i);
}
}
/****************************************************************************
* Public Functions
****************************************************************************/
int mfs_ctz_rdfromoff(FAR const struct mfs_sb_s * const sb,
const struct mfs_ctz_s ctz, mfs_t data_off,
mfs_t len, FAR char * buf)
{
int ret = OK;
mfs_t i;
mfs_t rd_sz;
mfs_t cur_pg;
mfs_t cur_idx;
mfs_t cur_pgoff;
mfs_t end_idx;
mfs_t end_pgoff;
mfs_t pg_rd_sz;
ctz_off2loc(sb, data_off + len, &cur_idx, &cur_pgoff);
ctz_off2loc(sb, data_off, &end_idx, &end_pgoff);
if (ctz.idx_e < cur_idx || ctz.idx_e < end_idx)
{
goto errout;
}
cur_pg = ctz_travel(sb, ctz.idx_e, ctz.pg_e, cur_idx);
rd_sz = 0;
if (predict_false(cur_pg == 0))
{
goto errout;
}
/* O(n) read by reading in reverse. */
if (cur_idx != end_idx)
{
for (i = cur_idx; i >= end_idx; i--)
{
if (predict_false(i == cur_idx))
{
pg_rd_sz = cur_pgoff;
ret = mfs_read_page(sb, buf - pg_rd_sz, pg_rd_sz, cur_pg,
0);
cur_pgoff = 0;
}
else if (predict_false(i == end_idx))
{
pg_rd_sz = ctz_blkdatasz(sb, i) - end_pgoff;
ret = mfs_read_page(sb, buf - pg_rd_sz, pg_rd_sz, cur_pg,
end_pgoff);
}
else
{
pg_rd_sz = ctz_blkdatasz(sb, i);
ret = mfs_read_page(sb, buf - pg_rd_sz, pg_rd_sz, cur_pg,
0);
}
if (predict_false(ret == 0))
{
ret = -EINVAL;
goto errout;
}
buf -= pg_rd_sz;
}
cur_pg = ctz_travel(sb, cur_idx, cur_pg, cur_idx - 1);
if (predict_false(cur_pg == 0))
{
ret = -EINVAL;
goto errout;
}
}
else
{
ret = mfs_read_page(sb, buf, len, cur_pg, end_pgoff);
if (predict_false(ret == 0))
{
ret = -EINVAL;
goto errout;
}
}
errout:
return ret;
}
int mfs_ctz_wrtnode(FAR struct mfs_sb_s * const sb,
FAR const struct mfs_node_s * const node)
{
int ret = OK;
bool written = false;
mfs_t prev;
mfs_t rem_sz;
mfs_t new_pg;
mfs_t cur_pg;
mfs_t cur_idx;
mfs_t cur_pgoff;
mfs_t lower;
mfs_t upper;
mfs_t upper_og;
mfs_t lower_upd;
mfs_t upper_upd;
mfs_t del_bytes;
FAR char *buf = NULL;
FAR char *tmp = NULL;
struct mfs_ctz_s ctz;
FAR struct mfs_delta_s *delta;
/* Traverse common CTZ blocks. */
ctz_off2loc(sb, node->range_min, &cur_idx, &cur_pgoff);
ctz = node->path[node->depth - 1].ctz;
cur_pg = ctz_travel(sb, ctz.idx_e, ctz.pg_e, cur_idx);
/* So, till cur_idx - 1, the CTZ blocks are common. */
buf = kmm_zalloc(MFS_PGSZ(sb));
if (predict_false(buf == NULL))
{
ret = -ENOMEM;
goto errout;
}
/* Initially, there might be some offset in cur_idx CTZ blocks that is
* unmodified as well.
*/
tmp = buf;
mfs_read_page(sb, tmp, cur_pgoff, cur_pg, 0);
tmp += cur_pgoff;
/* Modifications. */
prev = 0;
rem_sz = node->sz;
lower = node->range_min;
del_bytes = 0;
/* [lower, upper) range. Two pointer approach. Window gets narrower
* for every delete falling inside it.
*/
while (rem_sz > 0)
{
upper = MIN(prev + lower + ctz_blkdatasz(sb, cur_idx), rem_sz);
upper_og = upper;
list_for_every_entry(&node->delta, delta, struct mfs_delta_s, list)
{
if (delta->upd == NULL)
{
/* Delete */
lower_upd = MAX(lower, delta->off);
upper_upd = MIN(upper, delta->off + delta->n_b);
if (lower_upd >= upper_upd)
{
/* Skip this delta. */
continue;
}
else
{
del_bytes += upper_upd - lower_upd;
memmove(tmp + lower_upd, tmp + upper_upd,
upper - upper_upd);
upper -= upper_upd;
}
}
else
{
/* Update */
ret = mfs_ctz_rdfromoff(sb, ctz, lower, upper - lower,
tmp);
if (predict_false(ret < 0))
{
goto errout_with_buf;
}
}
}
/* rem_sz check for final write. */
if (upper == upper_og || rem_sz == upper - lower)
{
prev = 0;
/* Time to write a page for new CTZ list. */
new_pg = mfs_ba_getpg(sb);
if (predict_false(new_pg == 0))
{
ret = -ENOSPC;
goto errout_with_buf;
}
ctz_copyidxptrs(sb, ctz, cur_idx, buf);
ret = mfs_write_page(sb, buf, MFS_PGSZ(sb), new_pg, 0);
if (predict_false(ret == 0))
{
ret = -EINVAL;
goto errout_with_buf;
}
memset(buf, 0, MFS_PGSZ(sb));
tmp = buf;
cur_idx++;
written = true;
}
else
{
tmp += upper - lower;
written = false;
}
prev = upper - lower;
rem_sz -= upper - lower;
lower = upper;
}
DEBUGASSERT(written);
/* TODO: Need to verify for cases where the delete extends outside, etc. */
/* Write log. */
ctz.idx_e = cur_idx;
ctz.pg_e = new_pg;
ret = mfs_jrnl_wrlog(sb, *node, ctz, node->sz);
if (predict_false(ret < 0))
{
goto errout_with_buf;
}
if (MFS_JRNL(sb).log_cblkidx >= MFS_JRNL_LIM(sb))
{
ret = mfs_jrnl_flush(sb);
if (predict_false(ret < 0))
{
goto errout_with_buf;
}
}
errout_with_buf:
kmm_free(buf);
errout:
return ret;
}

View file

@ -85,6 +85,7 @@ static const char *next_child(FAR const char *relpath);
static const char *last_child(FAR const char *relpath);
static FAR char *mfs_ser_dirent(FAR const struct mfs_dirent_s * const x,
FAR char * const out);
static FAR const char *mfs_deser_dirent(FAR const char * const in,
FAR struct mfs_dirent_s * const x);
@ -285,11 +286,87 @@ static FAR const char *mfs_deser_dirent(FAR const char * const in,
return i;
}
int pitr_traverse(FAR struct mfs_sb_s *sb, FAR struct mfs_path_s *path,
mfs_t depth, FAR mfs_t *cap)
{
int ret = OK;
mfs_t i;
mfs_t pg;
struct mfs_pitr_s pitr;
struct mfs_ctz_s ctz;
FAR struct mfs_dirent_s *dirent = NULL;
/* TODO: Double traversal can be made faster into a single traversal. */
ctz = path[depth - 1].ctz;
if (ctz.idx_e == 0 && ctz.pg_e == 0)
{
/* Not a valid one. TODO: Does this happens? */
goto errout;
}
for (i = ctz.idx_e; i > 0; i--)
{
mfs_ba_markusedpg(sb, pg);
pg = mfs_ctz_travel(sb, i, pg, i - 1);
if (pg == 0)
{
break;
}
}
memset(path + depth, 0, *cap - depth);
if (depth == *cap)
{
*cap = (*cap * 3) / 2; /* Don't want to double it for memory. */
path = kmm_realloc(path, (*cap) * sizeof(struct mfs_path_s));
if (predict_false(path == NULL))
{
ret = -ENOMEM;
goto errout;
}
}
mfs_pitr_init(sb, path, depth, &pitr, true);
while (true)
{
mfs_pitr_readdirent(sb, path, &pitr, &dirent);
if (dirent == NULL)
{
break;
}
if (S_ISDIR(dirent->mode))
{
path[(depth + 1) - 1].ctz = dirent->ctz;
ret = pitr_traverse(sb, path, depth + 1, cap);
if (predict_false(ret < 0))
{
mfs_free_dirent(dirent);
goto errout;
}
}
mfs_pitr_adv_bydirent(&pitr, dirent);
mfs_free_dirent(dirent);
}
errout:
return ret;
}
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
FAR const char * mfs_path2childname(FAR const char *relpath)
FAR const char *mfs_path2childname(FAR const char *relpath)
{
FAR const char *last = relpath + strlen(relpath) - 1;
@ -317,9 +394,9 @@ mfs_t mfs_get_fsz(FAR struct mfs_sb_s * const sb,
{
sz = MFS_MN(sb).root_sz; /* Updated size. */
/* Journal updated to the root creates a new master node entry. TODO
* this and moving of the journal.
*/
/* Journal updated to the root creates a new master node entry. TODO
* this and moving of the journal.
*/
finfo("File size got as %u for root.", sz);
return sz;
@ -362,11 +439,6 @@ bool mfs_searchfopen(FAR const struct mfs_sb_s * const sb,
continue;
}
/* TODO: Ensure when an LRU's delta is flushed to the journal, the
* new location is updated in the LRU AND the open files, if it is
* open.
*/
if (mfs_path_eq(&ofd->com->path[depth - 1], &path[depth - 1]))
{
return true;
@ -400,7 +472,7 @@ int mfs_pitr_rmdirent(FAR struct mfs_sb_s * const sb,
int mfs_pitr_rm(FAR struct mfs_sb_s * const sb,
FAR struct mfs_path_s * const path,
const mfs_t depth)
const mfs_t depth, bool rm_child)
{
int ret = OK;
struct mfs_pitr_s pitr;
@ -408,7 +480,23 @@ int mfs_pitr_rm(FAR struct mfs_sb_s * const sb,
mfs_pitr_init(sb, path, depth, &pitr, true);
mfs_pitr_readdirent(sb, path, &pitr, &dirent);
ret = mfs_pitr_rmdirent(sb, path, depth, &pitr, dirent);
if (predict_false(ret < 0))
{
goto errout;
}
if (rm_child)
{
ret = mfs_lru_del(sb, 0, path[depth - 1].sz, path, depth);
if (predict_false(ret < 0))
{
goto errout;
}
}
errout:
mfs_free_dirent(dirent);
mfs_pitr_free(&pitr);
@ -558,9 +646,9 @@ int mfs_pitr_readdirent(FAR const struct mfs_sb_s * const sb,
*dirent = d;
DEBUGASSERT(pitr->depth == 0 || strcmp(d->name, ""));
finfo("Read direntry at %u offset, %u depth for CTZ (%u, %u). " \
"Direntry name: \"%s\" with name length %u.",
"Direntry name: \"%.*s\" with name length %u and size %u.",
pitr->c_off, pitr->depth, pitr->p.ctz.idx_e, pitr->p.ctz.pg_e,
d->name, d->namelen);
d->namelen, d->name, d->namelen, d->sz);
return ret;
@ -584,7 +672,7 @@ int mfs_pitr_adv(FAR struct mfs_sb_s * const sb,
FAR struct mfs_path_s *path,
FAR struct mfs_pitr_s * const pitr)
{
int ret = OK;
int ret = OK;
FAR struct mfs_dirent_s *dirent;
ret = mfs_pitr_readdirent(sb, path, pitr, &dirent);
@ -613,11 +701,11 @@ static int search_ctz_by_name(FAR const struct mfs_sb_s * const sb,
/* Applies LRU updates. */
int ret = OK;
uint16_t name_hash;
struct mfs_pitr_s pitr;
int ret = OK;
bool found = false;
uint16_t name_hash;
struct mfs_pitr_s pitr;
FAR struct mfs_dirent_s *nd;
bool found = false;
*dirent = NULL;
@ -864,7 +952,7 @@ int mfs_pitr_appenddirent(FAR struct mfs_sb_s * const sb,
goto errout;
}
/* TOFO: If the parent directory is newly formed (ie. size is 0), then
/* TODO: If the parent directory is newly formed (ie. size is 0), then
* allocate space for it. This can be done better. Just allocate page when
* its created and added first to LRU, and then add a check to ensure it
* doesn't get re-allocated when written. A field like "new" would be
@ -891,12 +979,12 @@ int mfs_pitr_appendnew(FAR struct mfs_sb_s * const sb,
{
/* Depth is depth of the child to be appended. */
int ret = OK;
struct timespec ts;
int ret = OK;
FAR const char *cur = last_child(relpath);
FAR const char *next = next_child(cur);
FAR struct mfs_dirent_s *d = NULL;
const mfs_t len = *next == 0 ? next - cur : next - cur - 1;
const mfs_t len = *next == 0 ? next - cur : next - cur - 1;
struct timespec ts;
FAR struct mfs_dirent_s *d = NULL;
DEBUGASSERT(depth > 0);
@ -942,3 +1030,34 @@ errout_with_d:
errout:
return ret;
}
/* Only for initialization of the block allocator. */
int mfs_pitr_traversefs(FAR struct mfs_sb_s * sb, const struct mfs_ctz_s ctz,
int type)
{
/* type takes in MFS_ISFILE & MFS_ISDIR. */
int ret = OK;
mfs_t capacity;
FAR struct mfs_path_s *path = NULL;
capacity = MFS_TRAVERSE_INITSZ;
path = kmm_zalloc(capacity * sizeof(struct mfs_path_s));
if (predict_false(path == NULL))
{
ret = -ENOMEM;
goto errout;
}
path[0].off = 0;
path[0].ctz = MFS_MN(sb).root_ctz;
path[0].sz = MFS_MN(sb).root_sz;
ret = pitr_traverse(sb, path, 0, &capacity);
mfs_free_patharr(path);
errout:
return ret;
}

View file

@ -483,13 +483,13 @@ errout:
return ret;
}
int mfs_jrnl_fmt(FAR struct mfs_sb_s * const sb, mfs_t blk1, mfs_t blk2)
int mfs_jrnl_fmt(FAR struct mfs_sb_s * const sb, FAR mfs_t *blk1,
FAR mfs_t *blk2, FAR mfs_t *jrnl_blk)
{
int i;
int ret = OK;
mfs_t sz;
mfs_t pg;
mfs_t mn;
mfs_t blk;
mfs_t n_pgs;
mfs_t rem_sz;
@ -509,26 +509,26 @@ int mfs_jrnl_fmt(FAR struct mfs_sb_s * const sb, mfs_t blk1, mfs_t blk2)
goto errout;
}
if (blk1 == 0 && blk2 == 0)
if (*blk1 == 0 && *blk2 == 0)
{
blk1 = mfs_ba_getblk(sb);
*blk1 = mfs_ba_getblk(sb);
if (predict_false(blk1 == 0))
{
ret = -ENOSPC;
goto errout_with_buf;
}
finfo("Allocated Master Block 1: %d.", blk1);
finfo("Allocated Master Block 1: %d.", *blk1);
blk2 = mfs_ba_getblk(sb);
*blk2 = mfs_ba_getblk(sb);
if (predict_false(blk2 == 0))
{
ret = -ENOSPC;
goto errout_with_buf;
}
finfo("Allocated Master Block 1: %d.", blk2);
finfo("New locations for Master Blocks %d & %d.", blk1, blk2);
finfo("Allocated Master Block 1: %d.", *blk2);
finfo("New locations for Master Blocks %d & %d.", *blk1, *blk2);
}
tmp = buf;
@ -548,8 +548,8 @@ int mfs_jrnl_fmt(FAR struct mfs_sb_s * const sb, mfs_t blk1, mfs_t blk2)
finfo("Allocated Journal Block %d at Block %d.", i, alloc_blk);
}
tmp = mfs_ser_mfs(blk1, tmp);
tmp = mfs_ser_mfs(blk2, tmp);
tmp = mfs_ser_mfs(*blk1, tmp);
tmp = mfs_ser_mfs(*blk2, tmp);
finfo("All Journal Blocks allocated.");
@ -576,26 +576,15 @@ int mfs_jrnl_fmt(FAR struct mfs_sb_s * const sb, mfs_t blk1, mfs_t blk2)
finfo("Written magic sequence, size and journal array into the journal.");
MFS_JRNL(sb).n_logs = 0;
MFS_JRNL(sb).n_blks = CONFIG_MNEMOFS_JOURNAL_NBLKS;
MFS_JRNL(sb).log_cpg = pg;
MFS_JRNL(sb).log_cblkidx = 0;
MFS_JRNL(sb).log_spg = MFS_JRNL(sb).log_cpg;
MFS_JRNL(sb).log_sblkidx = MFS_JRNL(sb).log_cblkidx;
MFS_JRNL(sb).jrnlarr_pg = MFS_BLK2PG(sb, blk);
MFS_JRNL(sb).jrnlarr_pgoff = MFS_JRNL_SUFFIXSZ;
MFS_JRNL(sb).n_blks = CONFIG_MNEMOFS_JOURNAL_NBLKS;
MFS_JRNL(sb).mblk1 = blk1;
MFS_JRNL(sb).mblk2 = blk2;
/* Master node */
mn = mfs_mn_fmt(sb, blk);
if (predict_false(mn == 0))
{
ret = -ENOSPC;
goto errout_with_buf;
}
/* TODO: Write master node's location in blk1, blk2. */
MFS_JRNL(sb).mblk1 = *blk1;
MFS_JRNL(sb).mblk2 = *blk2;
errout_with_buf:
kmm_free(buf);
@ -659,7 +648,7 @@ int mfs_jrnl_updatedinfo(FAR const struct mfs_sb_s * const sb,
mfs_t blkidx;
mfs_t counter = 0;
mfs_t pg_in_block;
struct jrnl_log_s tmplog;
struct jrnl_log_s tmplog;
/* TODO: Allow optional filling of updated timestamps, etc. */
@ -707,19 +696,19 @@ errout:
}
int mfs_jrnl_wrlog(FAR struct mfs_sb_s * const sb,
const struct mfs_node_s node,
FAR const struct mfs_node_s *node,
const struct mfs_ctz_s loc_new, const mfs_t sz_new)
{
int ret = OK;
mfs_t i;
mfs_t n_pgs;
mfs_t wr_sz;
mfs_t jrnl_pg;
mfs_t jrnl_blk;
FAR char *buf = NULL;
FAR char *tmp = NULL;
const mfs_t log_sz = sizeof(mfs_t) + MFS_LOGSZ(node.depth);
struct jrnl_log_s log;
int ret = OK;
mfs_t i;
mfs_t n_pgs;
mfs_t wr_sz;
mfs_t jrnl_pg;
mfs_t jrnl_blk;
FAR char *buf = NULL;
FAR char *tmp = NULL;
const mfs_t log_sz = sizeof(mfs_t) + MFS_LOGSZ(node->depth);
struct jrnl_log_s log;
buf = kmm_zalloc(log_sz); /* For size before log. */
if (predict_false(buf == NULL))
@ -730,16 +719,16 @@ int mfs_jrnl_wrlog(FAR struct mfs_sb_s * const sb,
/* Serialize */
log.depth = node.depth;
log.sz_new = node.depth;
log.depth = node->depth;
log.sz_new = sz_new;
log.loc_new = loc_new;
log.st_mtim_new = node.st_mtim;
log.st_atim_new = node.st_atim;
log.st_ctim_new = node.st_ctim;
log.path = node.path; /* Fine as temporarily usage. */
log.st_mtim_new = node->st_mtim;
log.st_atim_new = node->st_atim;
log.st_ctim_new = node->st_ctim;
log.path = node->path; /* Fine as temporarily usage. */
tmp = buf;
tmp = mfs_ser_mfs(log_sz - sizeof(mfs_t), tmp);
tmp = mfs_ser_mfs(log_sz - sizeof(mfs_t), tmp); /* First 4 bytes have sz */
tmp = ser_log(&log, tmp);
/* Store */
@ -820,7 +809,200 @@ errout:
int mfs_jrnl_flush(FAR struct mfs_sb_s * const sb)
{
/* TODO */
/* When a file or a directory is deleted.
*
* It will be modified to an entry in the LRU which details the deletion
* of all bytes from the child... as in, offset 0, deleted bytes is the
* size of the file.
*
* The new "location" can be used as (0, 0) to signify a deletion, even in
* its journal log.
*
* Also ensure if the size gets updated to 0.
*
* Then the flush operation problem will be solved for removal of files or
* directories.
*
* Move operation will not empty the child, but only the parent from the
* old parent.
*/
return OK;
/* Time complexity is going to be horrendous. Hint: O(n^2). HOWEVER, as
* littlefs points out....if n is constant, it's essentially a O(k), or
* O(1) :D
*/
/* TODO: Need to consider how the LRU and Journal interact with each other
* for newly created fs object's entries.
*/
/* We're using updatectz to update the LRU inside the journal. Think
* about how that might affect the iteration attempts.
*/
int ret = OK;
mfs_t blkidx = MFS_JRNL(sb).log_sblkidx;
mfs_t log_itr = 0;
mfs_t pg_in_blk = MFS_JRNL(sb).log_spg \
% MFS_PGINBLK(sb);
mfs_t tmp_blkidx;
mfs_t tmp_pg_in_blk;
mfs_t mn_blk1;
mfs_t mn_blk2;
mfs_t i;
mfs_t jrnl_blk;
mfs_t blk;
struct jrnl_log_s log;
struct jrnl_log_s tmp_log;
FAR struct mfs_path_s *path = NULL;
struct mfs_jrnl_state_s j_state;
struct mfs_mn_s mn_state;
while (log_itr < MFS_JRNL(sb).n_logs)
{
ret = jrnl_rdlog(sb, &blkidx, &pg_in_blk, &log);
if (predict_false(ret < 0))
{
DEBUGASSERT(ret != -ENOSPC); /* While condition is sufficient. */
goto errout;
}
if (log.loc_new.pg_e == 0 && log.loc_new.idx_e == 0)
{
/* Entry is deleted, do not bother with it. */
break;
}
tmp_blkidx = blkidx;
tmp_pg_in_blk = pg_in_blk;
path = kmm_zalloc(log.depth * sizeof(struct mfs_path_s));
if (predict_false(path == NULL))
{
goto errout;
}
memcpy(path, log.path, log.depth * sizeof(struct mfs_path_s));
path[log.depth - 1].ctz = log.loc_new;
for (; ; )
{
ret = jrnl_rdlog(sb, &tmp_blkidx, &tmp_pg_in_blk, &tmp_log);
if (ret == -ENOSPC)
{
break;
}
else if (predict_false(ret < 0))
{
jrnl_log_free(&log);
goto errout;
}
if (tmp_log.depth > log.depth)
{
jrnl_log_free(&tmp_log);
continue;
}
if (!mfs_path_eq(&path[tmp_log.depth - 1],
&tmp_log.path[tmp_log.depth - 1]))
{
jrnl_log_free(&tmp_log);
continue;
}
path[tmp_log.depth - 1] = tmp_log.path[tmp_log.depth - 1];
if (tmp_log.loc_new.pg_e == 0 && tmp_log.loc_new.idx_e == 0)
{
/* Entry is deleted, do not bother with it. */
break;
}
}
if (log.depth == 1)
{
MFS_MN(sb).root_ctz = path[log.depth - 1].ctz;
MFS_MN(sb).root_sz = path[log.depth - 1].sz;
/* TODO: Other parameters. */
}
else
{
ret = mfs_lru_updatectz(sb, path, log.depth,
path[log.depth - 1].ctz,
path[log.depth - 1].sz);
if (predict_false(ret < 0))
{
mfs_free_patharr(path);
jrnl_log_free(&log);
goto errout;
}
}
mfs_free_patharr(path);
jrnl_log_free(&log);
}
if (MFS_MN(sb).mblk_idx == MFS_PGINBLK(sb))
{
mn_blk1 = 0;
mn_blk2 = 0;
}
else
{
/* FUTURE TODO: Save the two block numbers in master node structure to
* be faster.
*/
mn_blk1 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks);
mn_blk2 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks + 1);
}
/* Reallocate journal. */
j_state = MFS_JRNL(sb);
mn_state = MFS_MN(sb);
ret = mfs_jrnl_fmt(sb, &mn_blk1, &mn_blk2, &jrnl_blk);
if (predict_false(ret < 0))
{
MFS_JRNL(sb) = j_state;
goto errout;
}
/* Write master node entry. */
ret = mfs_mn_sync(sb, &path[0], mn_blk1, mn_blk2, jrnl_blk);
if (predict_false(ret < 0))
{
MFS_MN(sb) = mn_state;
goto errout;
}
/* Mark all old blocks of journal (and master blocks) as deletable. */
for (i = 0; i < MFS_JRNL(sb).n_blks + 2; i++)
{
blk = mfs_jrnl_blkidx2blk(sb, i);
mfs_ba_blkmarkdel(sb, blk);
}
/* Delete outdated blocks. */
ret = mfs_ba_delmarked(sb);
if (predict_false(ret < 0))
{
goto errout;
}
errout:
return ret;
}
bool mfs_jrnl_isempty(FAR const struct mfs_sb_s * const sb)
{
return MFS_JRNL(sb).n_logs == 0;
}

View file

@ -97,11 +97,12 @@ static void lru_nodesearch(FAR const struct mfs_sb_s * const sb,
FAR const struct mfs_path_s * const path,
const mfs_t depth, FAR struct mfs_node_s **node);
static bool lru_islrufull(FAR struct mfs_sb_s * const sb);
static bool lru_isnodefull(FAR struct mfs_node_s *node);
static bool lru_isnodefull(FAR struct mfs_sb_s * const sb,
FAR struct mfs_node_s *node);
static int lru_nodeflush(FAR struct mfs_sb_s * const sb,
FAR struct mfs_path_s * const path,
const mfs_t depth, FAR struct mfs_node_s *node,
bool clean_node);
bool rm_node);
static int lru_wrtooff(FAR struct mfs_sb_s * const sb, const mfs_t data_off,
mfs_t bytes, int op,
FAR struct mfs_path_s * const path,
@ -109,6 +110,7 @@ static int lru_wrtooff(FAR struct mfs_sb_s * const sb, const mfs_t data_off,
static int lru_updatesz(FAR struct mfs_sb_s * sb,
FAR struct mfs_path_s * const path,
const mfs_t depth, const mfs_t new_sz);
static void lru_node_free(FAR struct mfs_node_s *node);
/****************************************************************************
* Private Data
@ -214,11 +216,15 @@ static void lru_nodesearch(FAR const struct mfs_sb_s * const sb,
* true - LRU is full
* false - LRU is not full.
*
* Assumptions/Limitations:
* When the journal is being flushed, LRU memory limiters will be turned
* off.
*
****************************************************************************/
static bool lru_islrufull(FAR struct mfs_sb_s * const sb)
{
return sb->n_lru == CONFIG_MNEMOFS_NLRU;
return !MFS_FLUSH(sb) && list_length(&MFS_LRU(sb)) == CONFIG_MNEMOFS_NLRU;
}
/****************************************************************************
@ -234,11 +240,16 @@ static bool lru_islrufull(FAR struct mfs_sb_s * const sb)
* true - LRU node is full
* false - LRU node is not full.
*
* Assumptions/Limitations:
* When the journal is being flushed, LRU memory limiters will be turned
* off.
*
****************************************************************************/
static bool lru_isnodefull(FAR struct mfs_node_s *node)
static bool lru_isnodefull(FAR struct mfs_sb_s * const sb,
FAR struct mfs_node_s *node)
{
return node->n_list == CONFIG_MNEMOFS_NLRUDELTA;
return !MFS_FLUSH(sb) && node->n_list == CONFIG_MNEMOFS_NLRUDELTA;
}
/****************************************************************************
@ -263,15 +274,16 @@ static void lru_free_delta(FAR struct mfs_delta_s *delta)
*
* Description:
* Clear out the deltas in a node by writing them to the flash, and adding
* a log about it to the journal.
* a log about it to the journal. Does not flush the journal, and assumes
* enough space is in the journal to handle a log.
*
* Input Parameters:
* sb - Superblock instance of the device.
* path - CTZ representation of the relpath.
* depth - Depth of `path`.
* node - LRU node to flush.
* clean_node - To remove node out of LRU (true), or just clear the
* deltas (false).
* sb - Superblock instance of the device.
* path - CTZ representation of the relpath.
* depth - Depth of `path`.
* node - LRU node to flush.
* rm_node - To remove node out of LRU (true), or just clear the deltas
* (false).
*
* Returned Value:
* 0 - OK
@ -282,39 +294,59 @@ static void lru_free_delta(FAR struct mfs_delta_s *delta)
static int lru_nodeflush(FAR struct mfs_sb_s * const sb,
FAR struct mfs_path_s * const path,
const mfs_t depth, FAR struct mfs_node_s *node,
bool clean_node)
bool rm_node)
{
int ret = OK;
struct mfs_ctz_s loc;
FAR struct mfs_delta_s *delta = NULL;
FAR struct mfs_delta_s *tmp = NULL;
if (predict_false(node == NULL))
{
return -ENOMEM;
return -EINVAL;
}
/* TODO: Implement effct of clean_node. */
ret = mfs_ctz_wrtnode(sb, node);
ret = mfs_ctz_wrtnode(sb, node, &loc);
if (predict_false(ret < 0))
{
goto errout;
}
/* Reset node stats. */
node->range_max = 0;
node->range_min = UINT32_MAX;
/* Free deltas after flush. */
list_for_every_entry_safe(&node->list, delta, tmp, struct mfs_delta_s,
finfo("Removing Deltas.");
list_for_every_entry_safe(&node->delta, delta, tmp, struct mfs_delta_s,
list)
{
list_delete_init(&delta->list);
lru_free_delta(delta);
}
if (rm_node)
{
finfo("Deleting node. Old size: %u.", list_length(&MFS_LRU(sb)));
list_delete_init(&node->list);
finfo("Deleted node. New size: %u.", list_length(&MFS_LRU(sb)));
}
else
{
/* Reset node stats. */
finfo("Resetting node.");
memset(node, 0, sizeof(struct mfs_node_s));
node->range_min = UINT32_MAX;
}
finfo("Updating CTZ in parent.");
ret = mfs_lru_updatectz(sb, node->path, node->depth, loc, node->sz);
if (rm_node)
{
finfo("Freeing node.");
lru_node_free(node);
}
errout:
return ret;
}
@ -353,6 +385,8 @@ static int lru_wrtooff(FAR struct mfs_sb_s * const sb, const mfs_t data_off,
FAR struct mfs_node_s *last_node = NULL;
FAR struct mfs_delta_s *delta = NULL;
DEBUGASSERT(depth > 0);
lru_nodesearch(sb, path, depth, &node);
if (node == NULL)
@ -394,20 +428,24 @@ static int lru_wrtooff(FAR struct mfs_sb_s * const sb, const mfs_t data_off,
struct mfs_node_s, list);
list_delete_init(&last_node->list);
list_add_tail(&MFS_LRU(sb), &node->list);
finfo("LRU flushing node complete, now only %u nodes", sb->n_lru);
finfo("LRU flushing node complete, now only %u nodes",
list_length(&MFS_LRU(sb)));
}
else
{
list_add_tail(&MFS_LRU(sb), &node->list);
sb->n_lru++;
finfo("Node inserted into LRU, and it now %u node(s).", sb->n_lru);
finfo("Node inserted into LRU, and it now %u node(s).",
list_length(&MFS_LRU(sb)));
}
}
else if (found && lru_isnodefull(node))
else if (found && lru_isnodefull(sb, node))
{
/* Node flush writes to the flash and journal. */
/* This can be optimized further if needed, but for now, for saftey of
* the data, I think it's better to flush the entire thing. It won't
* flush ALL of it, just, whatever's required.
*/
ret = lru_nodeflush(sb, path, depth, node, false);
ret = mnemofs_flush(sb);
if (predict_false(ret < 0))
{
goto errout_with_node;
@ -563,12 +601,13 @@ static int lru_updatesz(FAR struct mfs_sb_s * sb,
mfs_ser_mfs(new_sz, buf);
/* This function will be used by mfs_lru_wr itself, but given that if
* there is no change in size, this won't cause an infinite loop, this
* should be fine.
* there is no change in size, this won't cause an infinite loop (or, in
* reality, a recursion till it reaches the top of the tree), this should
* be fine.
*/
ret = mfs_lru_wr(sb, path[depth - 2].off + offsetof(struct mfs_dirent_s,
sz), sizeof(mfs_t), path, depth, buf);
sz), sizeof(mfs_t), path, depth - 1, buf);
if (predict_false(ret < 0))
{
goto errout;
@ -580,22 +619,220 @@ errout:
return ret;
}
static void lru_node_free(FAR struct mfs_node_s *node)
{
mfs_free_patharr(node->path);
kmm_free(node);
}
static bool lru_sort_cmp(FAR struct mfs_node_s * const node,
FAR struct mfs_node_s * const pivot)
{
return node->depth < pivot->depth;
}
static void lru_sort(FAR struct mfs_sb_s * const sb,
FAR struct list_node *left,
FAR struct list_node *right)
{
FAR struct mfs_node_s *node = NULL;
FAR struct mfs_node_s *next = NULL;
FAR struct mfs_node_s *pivot = NULL;
FAR struct list_node *aend = NULL; /* After end. */
FAR struct list_node *bfirst = NULL; /* Before first. */
if (left == right)
{
return;
}
/* If left or right is NULL, it means that refers to MFS_LRU(sb). */
aend = right->next;
bfirst = left->prev;
node = list_container_of(left, struct mfs_node_s, list);
pivot = list_container_of(right, struct mfs_node_s, list);
if (node->list.next == &pivot->list)
{
/* Only two items in the window...node and pivot, so insertion sort. */
if (lru_sort_cmp(node, pivot))
{
/* Add node after the pivot. */
list_delete_init(&node->list);
list_add_after(&pivot->list, &node->list);
DEBUGASSERT(pivot->list.prev == bfirst);
}
DEBUGASSERT(!lru_sort_cmp(node, pivot));
return;
}
list_for_every_entry_safe_from(&MFS_LRU(sb), node, next, struct mfs_node_s,
list)
{
if (node == pivot)
{
break;
}
if (lru_sort_cmp(node, pivot))
{
/* Add node after the pivot. */
list_delete_init(&node->list);
list_add_after(&pivot->list, &node->list);
}
}
if (bfirst->next != &pivot->list)
{
lru_sort(sb, bfirst->next, pivot->list.prev);
}
if (aend->prev != &pivot->list)
{
lru_sort(sb, pivot->list.next, aend->prev);
}
}
/****************************************************************************
* Public Functions
****************************************************************************/
int mfs_lru_ctzflush(FAR struct mfs_sb_s * const sb,
FAR struct mfs_path_s * const path, const mfs_t depth)
int mfs_lru_flush(FAR struct mfs_sb_s * const sb)
{
struct mfs_node_s *node = NULL;
int ret = OK;
FAR struct mfs_node_s *tmp = NULL;
FAR struct mfs_node_s *tmp2 = NULL;
FAR struct mfs_node_s *node = NULL;
FAR struct mfs_node_s *next = NULL;
lru_nodesearch(sb, path, depth, &node);
if (node == NULL)
/* Modified quick sort in linked lists. What is wanted is like inverted
* topological sort, but all the files (no children) are at the front,
* their depths don't matter. BUT, when it comes to directories, they need
* to be sorted in a decreasing order of their depths to reduce updates due
* to CoW. This will trickle up to the root, such that the root will be the
* last to get updated, and then the master node.
*
* However, since passing the mode all the way requires a lot of change, and
* is a redundant piece of information in most cases, the quick sort can
* simply be done on the basis of depth, and this adventure can be left as a
* TOOD.
*
* This involves recursion, but given the LRU size is a constant, the depth
* of recursion will be log2(n). For an LRU size of even 128 (which is quite
* big), the stack depth for this will be 7.
*/
finfo("Sorting the LRU. No. of nodes: %u.", list_length(&MFS_LRU(sb)));
lru_sort(sb, MFS_LRU(sb).next, MFS_LRU(sb).prev);
MFS_FLUSH(sb) = true;
list_for_every_entry_safe(&MFS_LRU(sb), node, next, struct mfs_node_s,
list)
{
return OK;
finfo("Current node depth: %u.", node->depth);
if (node->depth != 1)
{
finfo("Checking for parent.");
/* Ensuring parent is either present, or inserted into the LRU.
* No need of doing this before removing current node from LRU,
* however, this allows us to possibly skip allocating path again
* after freeing the current node.
*/
/* We can not rely on normal LRU node insertions, as they will
* not be inserted in a sorted manner, and would need the entire
* LRU to be sorted again, so we insert it manually.
*/
lru_nodesearch(sb, node->path, node->depth - 1, &tmp);
if (tmp == NULL)
{
finfo("Adding parent to LRU");
tmp = kmm_zalloc(sizeof(struct mfs_node_s));
if (predict_false(tmp == NULL))
{
ret = -ENOMEM;
goto errout;
}
tmp->range_max = 0;
tmp->range_min = UINT32_MAX;
/* TODO: Time fields. in tmp. */
tmp->depth = node->depth - 1;
tmp->path = kmm_zalloc((node->depth - 1)
* sizeof(struct mfs_path_s));
if (predict_false(tmp->path == NULL))
{
ret = -ENOMEM;
goto errout_with_tmp;
}
memcpy(tmp->path, node->path,
sizeof(struct mfs_path_s) * tmp->depth);
list_initialize(&tmp->list);
list_initialize(&tmp->delta);
/* Insert into sorted. */
list_for_every_entry(&MFS_LRU(sb), tmp2, struct mfs_node_s,
list)
{
if (!lru_sort_cmp(tmp, tmp2))
{
list_add_before(&tmp2->list, &tmp->list);
if (tmp2->list.prev == &node->list)
{
next = tmp2;
}
break;
}
}
}
else
{
finfo("Parent already in LRU.");
}
}
else
{
finfo("Root node from LRU.");
}
/* Parent gets updated inside the LRU in the function below. */
finfo("Flushing node.");
ret = lru_nodeflush(sb, node->path, node->depth, node, true);
if (predict_true(ret < 0))
{
goto errout;
}
}
return lru_nodeflush(sb, path, depth, node, true);
return ret;
errout_with_tmp:
lru_node_free(node);
errout:
MFS_FLUSH(sb) = false;
return ret;
}
int mfs_lru_del(FAR struct mfs_sb_s * const sb, const mfs_t data_off,
@ -615,7 +852,6 @@ int mfs_lru_wr(FAR struct mfs_sb_s * const sb, const mfs_t data_off,
void mfs_lru_init(FAR struct mfs_sb_s * const sb)
{
list_initialize(&MFS_LRU(sb));
sb->n_lru = 0;
finfo("LRU Initialized\n");
}
@ -639,23 +875,24 @@ int mfs_lru_rdfromoff(FAR const struct mfs_sb_s * const sb,
FAR struct mfs_node_s *node = NULL;
FAR struct mfs_delta_s *delta = NULL;
lru_nodesearch(sb, path, depth, &node);
if (node == NULL)
{
goto errout;
}
/* Node is NOT supposed to be freed by the caller, it's a reference to
* the actual node in the LRU and freeing it could break the entire LRU.
*/
tmp = buf;
ctz = node->path[node->depth - 1].ctz;
ctz = path[depth - 1].ctz;
lower = data_off;
upper_og = lower + buflen;
upper = upper_og;
rem_sz = buflen;
lru_nodesearch(sb, path, depth, &node);
if (node == NULL)
{
mfs_ctz_rdfromoff(sb, ctz, 0, buflen, tmp);
goto errout;
}
while (rem_sz > 0)
{
mfs_ctz_rdfromoff(sb, ctz, lower, rem_sz, tmp);
@ -773,34 +1010,57 @@ int mfs_lru_updatedinfo(FAR const struct mfs_sb_s * const sb,
int mfs_lru_updatectz(FAR struct mfs_sb_s * sb,
FAR struct mfs_path_s * const path, const mfs_t depth,
const struct mfs_ctz_s new_ctz)
const struct mfs_ctz_s new_ctz, mfs_t new_sz)
{
int ret = OK;
char buf[sizeof(struct mfs_ctz_s)];
FAR struct mfs_node_s *node = NULL;
/* TODO: Other attributes like time stamps to be updated as well. */
list_for_every_entry(&MFS_LRU(sb), node, struct mfs_node_s, list)
{
if (node->depth >= depth &&
mfs_ctz_eq(&node->path[depth - 1].ctz, &path[depth - 1].ctz) &&
node->path[depth - 1].sz == path[depth - 1].sz)
mfs_ctz_eq(&node->path[depth - 1].ctz, &path[depth - 1].ctz))
{
node->path[depth - 1].ctz = new_ctz;
node->path[depth - 1].sz = path[depth - 1].sz;
}
}
if (depth == 1)
{
MFS_MN(sb).root_sz = new_sz;
MFS_MN(sb).root_ctz = new_ctz;
goto errout;
}
/* Write to LRU. */
memset(buf, 0, sizeof(struct mfs_ctz_s));
mfs_ser_ctz(&new_ctz, buf);
ret = mfs_lru_wr(sb, path[depth - 1].off + offsetof(struct mfs_dirent_s,
ctz), sizeof(struct mfs_ctz_s), path, depth, buf);
ctz), sizeof(struct mfs_ctz_s), path, depth - 1, buf);
if (predict_false(ret < 0))
{
goto errout;
}
ret = lru_updatesz(sb, path, depth, new_sz);
if (predict_false(ret < 0))
{
goto errout;
}
path[depth - 1].ctz = new_ctz;
path[depth - 1].sz = new_sz;
errout:
return ret;
}
bool mfs_lru_isempty(FAR struct mfs_sb_s * const sb)
{
return list_length(&MFS_LRU(sb)) == 0;
}

View file

@ -100,7 +100,7 @@
static FAR char *ser_mn(const struct mfs_mn_s mn,
FAR char * const out);
static FAR const char *deser_mn(FAR const char * const in,
FAR struct mfs_mn_s *mn, FAR uint8_t *hash);
FAR struct mfs_mn_s *mn, FAR uint16_t *hash);
/****************************************************************************
* Private Data
@ -141,7 +141,9 @@ static FAR char *ser_mn(const struct mfs_mn_s mn, FAR char * const out)
tmp = mfs_ser_ctz(&mn.root_ctz, tmp);
tmp = mfs_ser_mfs(mn.root_sz, tmp);
tmp = mfs_ser_timespec(&mn.ts, tmp);
tmp = mfs_ser_8(mfs_arrhash(out, tmp - out), tmp);
tmp = mfs_ser_16(mfs_hash(out, tmp - out), tmp);
/* TODO: Update this, and the make a macro for size of MN. */
return tmp;
}
@ -166,7 +168,7 @@ static FAR char *ser_mn(const struct mfs_mn_s mn, FAR char * const out)
****************************************************************************/
static FAR const char *deser_mn(FAR const char * const in,
FAR struct mfs_mn_s *mn, FAR uint8_t *hash)
FAR struct mfs_mn_s *mn, FAR uint16_t *hash)
{
FAR const char *tmp = in;
@ -175,7 +177,9 @@ static FAR const char *deser_mn(FAR const char * const in,
tmp = mfs_deser_ctz(tmp, &mn->root_ctz);
tmp = mfs_deser_mfs(tmp, &mn->root_sz);
tmp = mfs_deser_timespec(tmp, &mn->ts);
tmp = mfs_deser_8(tmp, hash);
tmp = mfs_deser_16(tmp, hash);
/* TODO: Update this, and the make a macro for size of MN. */
return tmp;
}
@ -192,15 +196,14 @@ int mfs_mn_init(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk)
mfs_t mblk2;
mfs_t jrnl_blk_tmp;
bool found = false;
uint8_t hash;
uint16_t hash;
struct mfs_mn_s mn;
const mfs_t sz = sizeof(struct mfs_mn_s) - sizeof(mn.pg);
char buftmp[4];
char buf[sz + 1];
mblk1 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks);
mblk2 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks + 1);
mblk1 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks);
mblk2 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks + 1);
mn.jrnl_blk = mn.jrnl_blk;
mn.mblk_idx = 0;
mn.pg = MFS_BLK2PG(sb, mblk1);
@ -238,7 +241,6 @@ int mfs_mn_init(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk)
}
else
{
mn.mblk_idx--;
mn.pg--;
}
@ -247,7 +249,7 @@ int mfs_mn_init(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk)
/* Deserialize. */
deser_mn(buf, &mn, &hash);
if (hash != mfs_arrhash(buf, sz))
if (hash != mfs_hash(buf, sz))
{
ret = -EINVAL;
goto errout;
@ -263,12 +265,11 @@ errout:
return ret;
}
int mfs_mn_fmt(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk)
int mfs_mn_fmt(FAR struct mfs_sb_s * const sb, const mfs_t mblk1,
const mfs_t mblk2, const mfs_t jrnl_blk)
{
int ret = OK;
mfs_t pg;
mfs_t mblk1;
mfs_t mblk2;
struct mfs_mn_s mn;
struct timespec ts;
const mfs_t sz = sizeof(struct mfs_mn_s) - sizeof(mn.pg);
@ -278,9 +279,6 @@ int mfs_mn_fmt(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk)
memset(buf, 0, sz + 1);
mblk1 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks);
mblk2 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks + 1);
pg = mfs_ba_getpg(sb);
if (predict_false(pg == 0))
{
@ -318,6 +316,7 @@ int mfs_mn_fmt(FAR struct mfs_sb_s * const sb, const mfs_t jrnl_blk)
goto errout;
}
mn.mblk_idx = 1;
MFS_MN(sb) = mn;
finfo("Master node written. Now at page %d, timestamp %lld.%.9ld.",
MFS_MN(sb).pg, (long long)MFS_MN(sb).ts.tv_sec,
@ -346,9 +345,9 @@ int mfs_mn_move(FAR struct mfs_sb_s * const sb, struct mfs_ctz_s root,
mblk2 = mfs_jrnl_blkidx2blk(sb, MFS_JRNL(sb).n_blks + 1);
mn = MFS_MN(sb);
mn.root_ctz = root;
mn.root_ctz = root;
mn.root_sz = root_sz;
mn.mblk_idx++;
mn.mblk_idx++; /* TODO */
mn.pg++;
ser_mn(mn, buf);
@ -364,3 +363,55 @@ int mfs_mn_move(FAR struct mfs_sb_s * const sb, struct mfs_ctz_s root,
errout:
return ret;
}
int mfs_mn_sync(FAR struct mfs_sb_s *sb,
FAR struct mfs_path_s * const new_loc,
const mfs_t blk1, const mfs_t blk2, const mfs_t jrnl_blk)
{
int ret = OK;
struct timespec ts;
struct mfs_mn_s mn;
const mfs_t sz = sizeof(struct mfs_mn_s) - sizeof(mn.pg);
char buf[sz + 1];
mn = MFS_MN(sb);
clock_gettime(CLOCK_REALTIME, &ts);
if (mn.mblk_idx == MFS_PGINBLK(sb))
{
/* New blocks have been already allocated by the journal. */
mn.mblk_idx = 0;
mn.pg = MFS_BLK2PG(sb, blk1);
}
mn.ts = ts;
mn.root_sz = new_loc->sz;
mn.root_ctz = new_loc->ctz;
mn.root_mode = 0777 | S_IFDIR;
/* TODO: Root timestamps. */
/* Serialize. */
ser_mn(mn, buf);
ret = mfs_write_page(sb, buf, sz, MFS_BLK2PG(sb, blk1) + mn.mblk_idx, 0);
if (predict_false(ret < 0))
{
goto errout;
}
ret = mfs_write_page(sb, buf, sz, MFS_BLK2PG(sb, blk2) + mn.mblk_idx, 0);
if (predict_false(ret < 0))
{
goto errout;
}
mn.mblk_idx++;
MFS_MN(sb) = mn;
errout:
return ret;
}

View file

@ -53,6 +53,8 @@
* Included Files
****************************************************************************/
#include <sys/param.h>
#include "mnemofs.h"
/****************************************************************************
@ -118,10 +120,10 @@ ssize_t mfs_write_page(FAR const struct mfs_sb_s * const sb,
return -EINVAL;
}
mempcpy(MFS_RWBUF(sb) + pgoff, data, datalen);
memcpy(MFS_RWBUF(sb) + pgoff, data, MIN(datalen, MFS_PGSZ(sb) - pgoff));
ret = MTD_BWRITE(MFS_MTD(sb), page, 1, MFS_RWBUF(sb));
if (ret < 0)
if (predict_false(ret < 0))
{
goto errout_with_reset;
}
@ -144,12 +146,12 @@ ssize_t mfs_read_page(FAR const struct mfs_sb_s * const sb,
}
ret = MTD_BREAD(MFS_MTD(sb), page, 1, MFS_RWBUF(sb));
if (ret < 0)
if (predict_false(ret < 0))
{
goto errout_with_reset;
}
memcpy(data, MFS_RWBUF(sb) + pgoff, datalen);
memcpy(data, MFS_RWBUF(sb) + pgoff, MIN(datalen, MFS_PGSZ(sb) - pgoff));
errout_with_reset:
memset(MFS_RWBUF(sb), 0, MFS_PGSZ(sb));

View file

@ -88,8 +88,8 @@
uint8_t mfs_arrhash(FAR const char *arr, ssize_t len)
{
ssize_t l = 0;
ssize_t r = len - 1;
ssize_t l = 0;
ssize_t r = len - 1;
uint16_t hash = 0;
/* TODO: Change the array checksum to be 16 bit long. */
@ -109,8 +109,8 @@ uint8_t mfs_arrhash(FAR const char *arr, ssize_t len)
uint16_t mfs_hash(FAR const char *arr, ssize_t len)
{
ssize_t l = 0;
ssize_t r = len - 1;
ssize_t l = 0;
ssize_t r = len - 1;
uint32_t hash = 0;
/* TODO: Change the array checksum to be 16 bit long. */