| /* |
| * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. |
| * |
| * Copyright (C) 2002-2011 Aleph One Ltd. |
| * for Toby Churchill Ltd and Brightstar Engineering |
| * |
| * Created by Charles Manning <charles@aleph1.co.uk> |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| */ |
| |
| #include "yportenv.h" |
| #include "yaffs_trace.h" |
| |
| #include "yaffs_guts.h" |
| #include "yaffs_getblockinfo.h" |
| #include "yaffs_tagscompat.h" |
| #include "yaffs_nand.h" |
| #include "yaffs_yaffs1.h" |
| #include "yaffs_yaffs2.h" |
| #include "yaffs_bitmap.h" |
| #include "yaffs_verify.h" |
| #include "yaffs_nand.h" |
| #include "yaffs_packedtags2.h" |
| #include "yaffs_nameval.h" |
| #include "yaffs_allocator.h" |
| #include "yaffs_attribs.h" |
| #include "yaffs_summary.h" |
| |
| /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */ |
| #define YAFFS_GC_GOOD_ENOUGH 2 |
| #define YAFFS_GC_PASSIVE_THRESHOLD 4 |
| |
| #include "yaffs_ecc.h" |
| |
| /* Forward declarations */ |
| |
| static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, |
| const u8 *buffer, int n_bytes, int use_reserve); |
| |
| |
| |
| /* Function to calculate chunk and offset */ |
| |
| void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr, |
| int *chunk_out, u32 *offset_out) |
| { |
| int chunk; |
| u32 offset; |
| |
| chunk = (u32) (addr >> dev->chunk_shift); |
| |
| if (dev->chunk_div == 1) { |
| /* easy power of 2 case */ |
| offset = (u32) (addr & dev->chunk_mask); |
| } else { |
| /* Non power-of-2 case */ |
| |
| loff_t chunk_base; |
| |
| chunk /= dev->chunk_div; |
| |
| chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk; |
| offset = (u32) (addr - chunk_base); |
| } |
| |
| *chunk_out = chunk; |
| *offset_out = offset; |
| } |
| |
| /* Function to return the number of shifts for a power of 2 greater than or |
| * equal to the given number |
| * Note we don't try to cater for all possible numbers and this does not have to |
| * be hellishly efficient. |
| */ |
| |
| static inline u32 calc_shifts_ceiling(u32 x) |
| { |
| int extra_bits; |
| int shifts; |
| |
| shifts = extra_bits = 0; |
| |
| while (x > 1) { |
| if (x & 1) |
| extra_bits++; |
| x >>= 1; |
| shifts++; |
| } |
| |
| if (extra_bits) |
| shifts++; |
| |
| return shifts; |
| } |
| |
| /* Function to return the number of shifts to get a 1 in bit 0 |
| */ |
| |
| static inline u32 calc_shifts(u32 x) |
| { |
| u32 shifts; |
| |
| shifts = 0; |
| |
| if (!x) |
| return 0; |
| |
| while (!(x & 1)) { |
| x >>= 1; |
| shifts++; |
| } |
| |
| return shifts; |
| } |
| |
| /* |
| * Temporary buffer manipulations. |
| */ |
| |
| static int yaffs_init_tmp_buffers(struct yaffs_dev *dev) |
| { |
| int i; |
| u8 *buf = (u8 *) 1; |
| |
| memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer)); |
| |
| for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) { |
| dev->temp_buffer[i].in_use = 0; |
| buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); |
| dev->temp_buffer[i].buffer = buf; |
| } |
| |
| return buf ? YAFFS_OK : YAFFS_FAIL; |
| } |
| |
| u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev) |
| { |
| int i; |
| |
| dev->temp_in_use++; |
| if (dev->temp_in_use > dev->max_temp) |
| dev->max_temp = dev->temp_in_use; |
| |
| for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { |
| if (dev->temp_buffer[i].in_use == 0) { |
| dev->temp_buffer[i].in_use = 1; |
| return dev->temp_buffer[i].buffer; |
| } |
| } |
| |
| yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers"); |
| /* |
| * If we got here then we have to allocate an unmanaged one |
| * This is not good. |
| */ |
| |
| dev->unmanaged_buffer_allocs++; |
| return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS); |
| |
| } |
| |
| void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer) |
| { |
| int i; |
| |
| dev->temp_in_use--; |
| |
| for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { |
| if (dev->temp_buffer[i].buffer == buffer) { |
| dev->temp_buffer[i].in_use = 0; |
| return; |
| } |
| } |
| |
| if (buffer) { |
| /* assume it is an unmanaged one. */ |
| yaffs_trace(YAFFS_TRACE_BUFFERS, |
| "Releasing unmanaged temp buffer"); |
| kfree(buffer); |
| dev->unmanaged_buffer_deallocs++; |
| } |
| |
| } |
| |
| /* |
| * Determine if we have a managed buffer. |
| */ |
| int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 *buffer) |
| { |
| int i; |
| |
| for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { |
| if (dev->temp_buffer[i].buffer == buffer) |
| return 1; |
| } |
| |
| for (i = 0; i < dev->param.n_caches; i++) { |
| if (dev->cache[i].data == buffer) |
| return 1; |
| } |
| |
| if (buffer == dev->checkpt_buffer) |
| return 1; |
| |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "yaffs: unmaged buffer detected."); |
| return 0; |
| } |
| |
| /* |
| * Functions for robustisizing TODO |
| * |
| */ |
| |
| static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk, |
| const u8 *data, |
| const struct yaffs_ext_tags *tags) |
| { |
| } |
| |
| static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk, |
| const struct yaffs_ext_tags *tags) |
| { |
| } |
| |
| void yaffs_handle_chunk_error(struct yaffs_dev *dev, |
| struct yaffs_block_info *bi) |
| { |
| if (!bi->gc_prioritise) { |
| bi->gc_prioritise = 1; |
| dev->has_pending_prioritised_gc = 1; |
| bi->chunk_error_strikes++; |
| |
| if (bi->chunk_error_strikes > 3) { |
| bi->needs_retiring = 1; /* Too many stikes, so retire */ |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "yaffs: Block struck out"); |
| |
| } |
| } |
| } |
| |
| static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk, |
| int erased_ok) |
| { |
| int flash_block = nand_chunk / dev->param.chunks_per_block; |
| struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); |
| |
| yaffs_handle_chunk_error(dev, bi); |
| |
| if (erased_ok) { |
| /* Was an actual write failure, |
| * so mark the block for retirement.*/ |
| bi->needs_retiring = 1; |
| yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, |
| "**>> Block %d needs retiring", flash_block); |
| } |
| |
| /* Delete the chunk */ |
| yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); |
| yaffs_skip_rest_of_block(dev); |
| } |
| |
| /* |
| * Verification code |
| */ |
| |
| /* |
| * Simple hash function. Needs to have a reasonable spread |
| */ |
| |
| static inline int yaffs_hash_fn(int n) |
| { |
| if (n < 0) |
| n = -n; |
| return n % YAFFS_NOBJECT_BUCKETS; |
| } |
| |
| /* |
| * Access functions to useful fake objects. |
| * Note that root might have a presence in NAND if permissions are set. |
| */ |
| |
| struct yaffs_obj *yaffs_root(struct yaffs_dev *dev) |
| { |
| return dev->root_dir; |
| } |
| |
| struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev) |
| { |
| return dev->lost_n_found; |
| } |
| |
| /* |
| * Erased NAND checking functions |
| */ |
| |
| int yaffs_check_ff(u8 *buffer, int n_bytes) |
| { |
| /* Horrible, slow implementation */ |
| while (n_bytes--) { |
| if (*buffer != 0xff) |
| return 0; |
| buffer++; |
| } |
| return 1; |
| } |
| |
| static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk) |
| { |
| int retval = YAFFS_OK; |
| u8 *data = yaffs_get_temp_buffer(dev); |
| struct yaffs_ext_tags tags; |
| |
| yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags); |
| |
| if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR) |
| retval = YAFFS_FAIL; |
| |
| if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) || |
| tags.chunk_used) { |
| yaffs_trace(YAFFS_TRACE_NANDACCESS, |
| "Chunk %d not erased", nand_chunk); |
| retval = YAFFS_FAIL; |
| } |
| |
| yaffs_release_temp_buffer(dev, data); |
| |
| return retval; |
| |
| } |
| |
| static int yaffs_verify_chunk_written(struct yaffs_dev *dev, |
| int nand_chunk, |
| const u8 *data, |
| struct yaffs_ext_tags *tags) |
| { |
| int retval = YAFFS_OK; |
| struct yaffs_ext_tags temp_tags; |
| u8 *buffer = yaffs_get_temp_buffer(dev); |
| |
| yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags); |
| if (memcmp(buffer, data, dev->data_bytes_per_chunk) || |
| temp_tags.obj_id != tags->obj_id || |
| temp_tags.chunk_id != tags->chunk_id || |
| temp_tags.n_bytes != tags->n_bytes) |
| retval = YAFFS_FAIL; |
| |
| yaffs_release_temp_buffer(dev, buffer); |
| |
| return retval; |
| } |
| |
| |
| int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks) |
| { |
| int reserved_chunks; |
| int reserved_blocks = dev->param.n_reserved_blocks; |
| int checkpt_blocks; |
| |
| checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev); |
| |
| reserved_chunks = |
| (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block; |
| |
| return (dev->n_free_chunks > (reserved_chunks + n_chunks)); |
| } |
| |
| static int yaffs_find_alloc_block(struct yaffs_dev *dev) |
| { |
| int i; |
| struct yaffs_block_info *bi; |
| |
| if (dev->n_erased_blocks < 1) { |
| /* Hoosterman we've got a problem. |
| * Can't get space to gc |
| */ |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "yaffs tragedy: no more erased blocks"); |
| |
| return -1; |
| } |
| |
| /* Find an empty block. */ |
| |
| for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { |
| dev->alloc_block_finder++; |
| if (dev->alloc_block_finder < dev->internal_start_block |
| || dev->alloc_block_finder > dev->internal_end_block) { |
| dev->alloc_block_finder = dev->internal_start_block; |
| } |
| |
| bi = yaffs_get_block_info(dev, dev->alloc_block_finder); |
| |
| if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) { |
| bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING; |
| dev->seq_number++; |
| bi->seq_number = dev->seq_number; |
| dev->n_erased_blocks--; |
| yaffs_trace(YAFFS_TRACE_ALLOCATE, |
| "Allocated block %d, seq %d, %d left" , |
| dev->alloc_block_finder, dev->seq_number, |
| dev->n_erased_blocks); |
| return dev->alloc_block_finder; |
| } |
| } |
| |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "yaffs tragedy: no more erased blocks, but there should have been %d", |
| dev->n_erased_blocks); |
| |
| return -1; |
| } |
| |
| static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver, |
| struct yaffs_block_info **block_ptr) |
| { |
| int ret_val; |
| struct yaffs_block_info *bi; |
| |
| if (dev->alloc_block < 0) { |
| /* Get next block to allocate off */ |
| dev->alloc_block = yaffs_find_alloc_block(dev); |
| dev->alloc_page = 0; |
| } |
| |
| if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) { |
| /* No space unless we're allowed to use the reserve. */ |
| return -1; |
| } |
| |
| if (dev->n_erased_blocks < dev->param.n_reserved_blocks |
| && dev->alloc_page == 0) |
| yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve"); |
| |
| /* Next page please.... */ |
| if (dev->alloc_block >= 0) { |
| bi = yaffs_get_block_info(dev, dev->alloc_block); |
| |
| ret_val = (dev->alloc_block * dev->param.chunks_per_block) + |
| dev->alloc_page; |
| bi->pages_in_use++; |
| yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page); |
| |
| dev->alloc_page++; |
| |
| dev->n_free_chunks--; |
| |
| /* If the block is full set the state to full */ |
| if (dev->alloc_page >= dev->param.chunks_per_block) { |
| bi->block_state = YAFFS_BLOCK_STATE_FULL; |
| dev->alloc_block = -1; |
| } |
| |
| if (block_ptr) |
| *block_ptr = bi; |
| |
| return ret_val; |
| } |
| |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!"); |
| |
| return -1; |
| } |
| |
| static int yaffs_get_erased_chunks(struct yaffs_dev *dev) |
| { |
| int n; |
| |
| n = dev->n_erased_blocks * dev->param.chunks_per_block; |
| |
| if (dev->alloc_block > 0) |
| n += (dev->param.chunks_per_block - dev->alloc_page); |
| |
| return n; |
| |
| } |
| |
| /* |
| * yaffs_skip_rest_of_block() skips over the rest of the allocation block |
| * if we don't want to write to it. |
| */ |
| void yaffs_skip_rest_of_block(struct yaffs_dev *dev) |
| { |
| struct yaffs_block_info *bi; |
| |
| if (dev->alloc_block > 0) { |
| bi = yaffs_get_block_info(dev, dev->alloc_block); |
| if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) { |
| bi->block_state = YAFFS_BLOCK_STATE_FULL; |
| dev->alloc_block = -1; |
| } |
| } |
| } |
| |
| static int yaffs_write_new_chunk(struct yaffs_dev *dev, |
| const u8 *data, |
| struct yaffs_ext_tags *tags, int use_reserver) |
| { |
| int attempts = 0; |
| int write_ok = 0; |
| int chunk; |
| |
| yaffs2_checkpt_invalidate(dev); |
| |
| do { |
| struct yaffs_block_info *bi = 0; |
| int erased_ok = 0; |
| |
| chunk = yaffs_alloc_chunk(dev, use_reserver, &bi); |
| if (chunk < 0) { |
| /* no space */ |
| break; |
| } |
| |
| /* First check this chunk is erased, if it needs |
| * checking. The checking policy (unless forced |
| * always on) is as follows: |
| * |
| * Check the first page we try to write in a block. |
| * If the check passes then we don't need to check any |
| * more. If the check fails, we check again... |
| * If the block has been erased, we don't need to check. |
| * |
| * However, if the block has been prioritised for gc, |
| * then we think there might be something odd about |
| * this block and stop using it. |
| * |
| * Rationale: We should only ever see chunks that have |
| * not been erased if there was a partially written |
| * chunk due to power loss. This checking policy should |
| * catch that case with very few checks and thus save a |
| * lot of checks that are most likely not needed. |
| * |
| * Mods to the above |
| * If an erase check fails or the write fails we skip the |
| * rest of the block. |
| */ |
| |
| /* let's give it a try */ |
| attempts++; |
| |
| if (dev->param.always_check_erased) |
| bi->skip_erased_check = 0; |
| |
| if (!bi->skip_erased_check) { |
| erased_ok = yaffs_check_chunk_erased(dev, chunk); |
| if (erased_ok != YAFFS_OK) { |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "**>> yaffs chunk %d was not erased", |
| chunk); |
| |
| /* If not erased, delete this one, |
| * skip rest of block and |
| * try another chunk */ |
| yaffs_chunk_del(dev, chunk, 1, __LINE__); |
| yaffs_skip_rest_of_block(dev); |
| continue; |
| } |
| } |
| |
| write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags); |
| |
| if (!bi->skip_erased_check) |
| write_ok = |
| yaffs_verify_chunk_written(dev, chunk, data, tags); |
| |
| if (write_ok != YAFFS_OK) { |
| /* Clean up aborted write, skip to next block and |
| * try another chunk */ |
| yaffs_handle_chunk_wr_error(dev, chunk, erased_ok); |
| continue; |
| } |
| |
| bi->skip_erased_check = 1; |
| |
| /* Copy the data into the robustification buffer */ |
| yaffs_handle_chunk_wr_ok(dev, chunk, data, tags); |
| |
| } while (write_ok != YAFFS_OK && |
| (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts)); |
| |
| if (!write_ok) |
| chunk = -1; |
| |
| if (attempts > 1) { |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "**>> yaffs write required %d attempts", |
| attempts); |
| dev->n_retried_writes += (attempts - 1); |
| } |
| |
| return chunk; |
| } |
| |
| /* |
| * Block retiring for handling a broken block. |
| */ |
| |
| static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block) |
| { |
| struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); |
| |
| yaffs2_checkpt_invalidate(dev); |
| |
| yaffs2_clear_oldest_dirty_seq(dev, bi); |
| |
| if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) { |
| if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) { |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "yaffs: Failed to mark bad and erase block %d", |
| flash_block); |
| } else { |
| struct yaffs_ext_tags tags; |
| int chunk_id = |
| flash_block * dev->param.chunks_per_block; |
| |
| u8 *buffer = yaffs_get_temp_buffer(dev); |
| |
| memset(buffer, 0xff, dev->data_bytes_per_chunk); |
| memset(&tags, 0, sizeof(tags)); |
| tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK; |
| if (dev->param.write_chunk_tags_fn(dev, chunk_id - |
| dev->chunk_offset, |
| buffer, |
| &tags) != YAFFS_OK) |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "yaffs: Failed to write bad block marker to block %d", |
| flash_block); |
| |
| yaffs_release_temp_buffer(dev, buffer); |
| } |
| } |
| |
| bi->block_state = YAFFS_BLOCK_STATE_DEAD; |
| bi->gc_prioritise = 0; |
| bi->needs_retiring = 0; |
| |
| dev->n_retired_blocks++; |
| } |
| |
| /*---------------- Name handling functions ------------*/ |
| |
| static u16 yaffs_calc_name_sum(const YCHAR *name) |
| { |
| u16 sum = 0; |
| u16 i = 1; |
| |
| if (!name) |
| return 0; |
| |
| while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) { |
| |
| /* 0x1f mask is case insensitive */ |
| sum += ((*name) & 0x1f) * i; |
| i++; |
| name++; |
| } |
| return sum; |
| } |
| |
| void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name) |
| { |
| memset(obj->short_name, 0, sizeof(obj->short_name)); |
| if (name && |
| yaffs_strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <= |
| YAFFS_SHORT_NAME_LENGTH) |
| yaffs_strcpy(obj->short_name, name); |
| else |
| obj->short_name[0] = _Y('\0'); |
| obj->sum = yaffs_calc_name_sum(name); |
| } |
| |
| void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj, |
| const struct yaffs_obj_hdr *oh) |
| { |
| #ifdef CONFIG_YAFFS_AUTO_UNICODE |
| YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1]; |
| memset(tmp_name, 0, sizeof(tmp_name)); |
| yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name, |
| YAFFS_MAX_NAME_LENGTH + 1); |
| yaffs_set_obj_name(obj, tmp_name); |
| #else |
| yaffs_set_obj_name(obj, oh->name); |
| #endif |
| } |
| |
| loff_t yaffs_max_file_size(struct yaffs_dev *dev) |
| { |
| return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk; |
| } |
| |
| /*-------------------- TNODES ------------------- |
| |
| * List of spare tnodes |
| * The list is hooked together using the first pointer |
| * in the tnode. |
| */ |
| |
| struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev) |
| { |
| struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev); |
| |
| if (tn) { |
| memset(tn, 0, dev->tnode_size); |
| dev->n_tnodes++; |
| } |
| |
| dev->checkpoint_blocks_required = 0; /* force recalculation */ |
| |
| return tn; |
| } |
| |
| /* FreeTnode frees up a tnode and puts it back on the free list */ |
| static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn) |
| { |
| yaffs_free_raw_tnode(dev, tn); |
| dev->n_tnodes--; |
| dev->checkpoint_blocks_required = 0; /* force recalculation */ |
| } |
| |
| static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev) |
| { |
| yaffs_deinit_raw_tnodes_and_objs(dev); |
| dev->n_obj = 0; |
| dev->n_tnodes = 0; |
| } |
| |
| void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn, |
| unsigned pos, unsigned val) |
| { |
| u32 *map = (u32 *) tn; |
| u32 bit_in_map; |
| u32 bit_in_word; |
| u32 word_in_map; |
| u32 mask; |
| |
| pos &= YAFFS_TNODES_LEVEL0_MASK; |
| val >>= dev->chunk_grp_bits; |
| |
| bit_in_map = pos * dev->tnode_width; |
| word_in_map = bit_in_map / 32; |
| bit_in_word = bit_in_map & (32 - 1); |
| |
| mask = dev->tnode_mask << bit_in_word; |
| |
| map[word_in_map] &= ~mask; |
| map[word_in_map] |= (mask & (val << bit_in_word)); |
| |
| if (dev->tnode_width > (32 - bit_in_word)) { |
| bit_in_word = (32 - bit_in_word); |
| word_in_map++; |
| mask = |
| dev->tnode_mask >> bit_in_word; |
| map[word_in_map] &= ~mask; |
| map[word_in_map] |= (mask & (val >> bit_in_word)); |
| } |
| } |
| |
| u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn, |
| unsigned pos) |
| { |
| u32 *map = (u32 *) tn; |
| u32 bit_in_map; |
| u32 bit_in_word; |
| u32 word_in_map; |
| u32 val; |
| |
| pos &= YAFFS_TNODES_LEVEL0_MASK; |
| |
| bit_in_map = pos * dev->tnode_width; |
| word_in_map = bit_in_map / 32; |
| bit_in_word = bit_in_map & (32 - 1); |
| |
| val = map[word_in_map] >> bit_in_word; |
| |
| if (dev->tnode_width > (32 - bit_in_word)) { |
| bit_in_word = (32 - bit_in_word); |
| word_in_map++; |
| val |= (map[word_in_map] << bit_in_word); |
| } |
| |
| val &= dev->tnode_mask; |
| val <<= dev->chunk_grp_bits; |
| |
| return val; |
| } |
| |
| /* ------------------- End of individual tnode manipulation -----------------*/ |
| |
| /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------ |
| * The look up tree is represented by the top tnode and the number of top_level |
| * in the tree. 0 means only the level 0 tnode is in the tree. |
| */ |
| |
| /* FindLevel0Tnode finds the level 0 tnode, if one exists. */ |
| struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev, |
| struct yaffs_file_var *file_struct, |
| u32 chunk_id) |
| { |
| struct yaffs_tnode *tn = file_struct->top; |
| u32 i; |
| int required_depth; |
| int level = file_struct->top_level; |
| |
| /* Check sane level and chunk Id */ |
| if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) |
| return NULL; |
| |
| if (chunk_id > YAFFS_MAX_CHUNK_ID) |
| return NULL; |
| |
| /* First check we're tall enough (ie enough top_level) */ |
| |
| i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; |
| required_depth = 0; |
| while (i) { |
| i >>= YAFFS_TNODES_INTERNAL_BITS; |
| required_depth++; |
| } |
| |
| if (required_depth > file_struct->top_level) |
| return NULL; /* Not tall enough, so we can't find it */ |
| |
| /* Traverse down to level 0 */ |
| while (level > 0 && tn) { |
| tn = tn->internal[(chunk_id >> |
| (YAFFS_TNODES_LEVEL0_BITS + |
| (level - 1) * |
| YAFFS_TNODES_INTERNAL_BITS)) & |
| YAFFS_TNODES_INTERNAL_MASK]; |
| level--; |
| } |
| |
| return tn; |
| } |
| |
| /* add_find_tnode_0 finds the level 0 tnode if it exists, |
| * otherwise first expands the tree. |
| * This happens in two steps: |
| * 1. If the tree isn't tall enough, then make it taller. |
| * 2. Scan down the tree towards the level 0 tnode adding tnodes if required. |
| * |
| * Used when modifying the tree. |
| * |
| * If the tn argument is NULL, then a fresh tnode will be added otherwise the |
| * specified tn will be plugged into the ttree. |
| */ |
| |
| struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev, |
| struct yaffs_file_var *file_struct, |
| u32 chunk_id, |
| struct yaffs_tnode *passed_tn) |
| { |
| int required_depth; |
| int i; |
| int l; |
| struct yaffs_tnode *tn; |
| u32 x; |
| |
| /* Check sane level and page Id */ |
| if (file_struct->top_level < 0 || |
| file_struct->top_level > YAFFS_TNODES_MAX_LEVEL) |
| return NULL; |
| |
| if (chunk_id > YAFFS_MAX_CHUNK_ID) |
| return NULL; |
| |
| /* First check we're tall enough (ie enough top_level) */ |
| |
| x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; |
| required_depth = 0; |
| while (x) { |
| x >>= YAFFS_TNODES_INTERNAL_BITS; |
| required_depth++; |
| } |
| |
| if (required_depth > file_struct->top_level) { |
| /* Not tall enough, gotta make the tree taller */ |
| for (i = file_struct->top_level; i < required_depth; i++) { |
| |
| tn = yaffs_get_tnode(dev); |
| |
| if (tn) { |
| tn->internal[0] = file_struct->top; |
| file_struct->top = tn; |
| file_struct->top_level++; |
| } else { |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "yaffs: no more tnodes"); |
| return NULL; |
| } |
| } |
| } |
| |
| /* Traverse down to level 0, adding anything we need */ |
| |
| l = file_struct->top_level; |
| tn = file_struct->top; |
| |
| if (l > 0) { |
| while (l > 0 && tn) { |
| x = (chunk_id >> |
| (YAFFS_TNODES_LEVEL0_BITS + |
| (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) & |
| YAFFS_TNODES_INTERNAL_MASK; |
| |
| if ((l > 1) && !tn->internal[x]) { |
| /* Add missing non-level-zero tnode */ |
| tn->internal[x] = yaffs_get_tnode(dev); |
| if (!tn->internal[x]) |
| return NULL; |
| } else if (l == 1) { |
| /* Looking from level 1 at level 0 */ |
| if (passed_tn) { |
| /* If we already have one, release it */ |
| if (tn->internal[x]) |
| yaffs_free_tnode(dev, |
| tn->internal[x]); |
| tn->internal[x] = passed_tn; |
| |
| } else if (!tn->internal[x]) { |
| /* Don't have one, none passed in */ |
| tn->internal[x] = yaffs_get_tnode(dev); |
| if (!tn->internal[x]) |
| return NULL; |
| } |
| } |
| |
| tn = tn->internal[x]; |
| l--; |
| } |
| } else { |
| /* top is level 0 */ |
| if (passed_tn) { |
| memcpy(tn, passed_tn, |
| (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8); |
| yaffs_free_tnode(dev, passed_tn); |
| } |
| } |
| |
| return tn; |
| } |
| |
| static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id, |
| int chunk_obj) |
| { |
| return (tags->chunk_id == chunk_obj && |
| tags->obj_id == obj_id && |
| !tags->is_deleted) ? 1 : 0; |
| |
| } |
| |
| static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk, |
| struct yaffs_ext_tags *tags, int obj_id, |
| int inode_chunk) |
| { |
| int j; |
| |
| for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) { |
| if (yaffs_check_chunk_bit |
| (dev, the_chunk / dev->param.chunks_per_block, |
| the_chunk % dev->param.chunks_per_block)) { |
| |
| if (dev->chunk_grp_size == 1) |
| return the_chunk; |
| else { |
| yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL, |
| tags); |
| if (yaffs_tags_match(tags, |
| obj_id, inode_chunk)) { |
| /* found it; */ |
| return the_chunk; |
| } |
| } |
| } |
| the_chunk++; |
| } |
| return -1; |
| } |
| |
| static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk, |
| struct yaffs_ext_tags *tags) |
| { |
| /*Get the Tnode, then get the level 0 offset chunk offset */ |
| struct yaffs_tnode *tn; |
| int the_chunk = -1; |
| struct yaffs_ext_tags local_tags; |
| int ret_val = -1; |
| struct yaffs_dev *dev = in->my_dev; |
| |
| if (!tags) { |
| /* Passed a NULL, so use our own tags space */ |
| tags = &local_tags; |
| } |
| |
| tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); |
| |
| if (!tn) |
| return ret_val; |
| |
| the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); |
| |
| ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, |
| inode_chunk); |
| return ret_val; |
| } |
| |
| static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk, |
| struct yaffs_ext_tags *tags) |
| { |
| /* Get the Tnode, then get the level 0 offset chunk offset */ |
| struct yaffs_tnode *tn; |
| int the_chunk = -1; |
| struct yaffs_ext_tags local_tags; |
| struct yaffs_dev *dev = in->my_dev; |
| int ret_val = -1; |
| |
| if (!tags) { |
| /* Passed a NULL, so use our own tags space */ |
| tags = &local_tags; |
| } |
| |
| tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); |
| |
| if (!tn) |
| return ret_val; |
| |
| the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); |
| |
| ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, |
| inode_chunk); |
| |
| /* Delete the entry in the filestructure (if found) */ |
| if (ret_val != -1) |
| yaffs_load_tnode_0(dev, tn, inode_chunk, 0); |
| |
| return ret_val; |
| } |
| |
| int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk, |
| int nand_chunk, int in_scan) |
| { |
| /* NB in_scan is zero unless scanning. |
| * For forward scanning, in_scan is > 0; |
| * for backward scanning in_scan is < 0 |
| * |
| * nand_chunk = 0 is a dummy insert to make sure the tnodes are there. |
| */ |
| |
| struct yaffs_tnode *tn; |
| struct yaffs_dev *dev = in->my_dev; |
| int existing_cunk; |
| struct yaffs_ext_tags existing_tags; |
| struct yaffs_ext_tags new_tags; |
| unsigned existing_serial, new_serial; |
| |
| if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) { |
| /* Just ignore an attempt at putting a chunk into a non-file |
| * during scanning. |
| * If it is not during Scanning then something went wrong! |
| */ |
| if (!in_scan) { |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "yaffs tragedy:attempt to put data chunk into a non-file" |
| ); |
| BUG(); |
| } |
| |
| yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); |
| return YAFFS_OK; |
| } |
| |
| tn = yaffs_add_find_tnode_0(dev, |
| &in->variant.file_variant, |
| inode_chunk, NULL); |
| if (!tn) |
| return YAFFS_FAIL; |
| |
| if (!nand_chunk) |
| /* Dummy insert, bail now */ |
| return YAFFS_OK; |
| |
| existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk); |
| |
| if (in_scan != 0) { |
| /* If we're scanning then we need to test for duplicates |
| * NB This does not need to be efficient since it should only |
| * happen when the power fails during a write, then only one |
| * chunk should ever be affected. |
| * |
| * Correction for YAFFS2: This could happen quite a lot and we |
| * need to think about efficiency! TODO |
| * Update: For backward scanning we don't need to re-read tags |
| * so this is quite cheap. |
| */ |
| |
| if (existing_cunk > 0) { |
| /* NB Right now existing chunk will not be real |
| * chunk_id if the chunk group size > 1 |
| * thus we have to do a FindChunkInFile to get the |
| * real chunk id. |
| * |
| * We have a duplicate now we need to decide which |
| * one to use: |
| * |
| * Backwards scanning YAFFS2: The old one is what |
| * we use, dump the new one. |
| * YAFFS1: Get both sets of tags and compare serial |
| * numbers. |
| */ |
| |
| if (in_scan > 0) { |
| /* Only do this for forward scanning */ |
| yaffs_rd_chunk_tags_nand(dev, |
| nand_chunk, |
| NULL, &new_tags); |
| |
| /* Do a proper find */ |
| existing_cunk = |
| yaffs_find_chunk_in_file(in, inode_chunk, |
| &existing_tags); |
| } |
| |
| if (existing_cunk <= 0) { |
| /*Hoosterman - how did this happen? */ |
| |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "yaffs tragedy: existing chunk < 0 in scan" |
| ); |
| |
| } |
| |
| /* NB The deleted flags should be false, otherwise |
| * the chunks will not be loaded during a scan |
| */ |
| |
| if (in_scan > 0) { |
| new_serial = new_tags.serial_number; |
| existing_serial = existing_tags.serial_number; |
| } |
| |
| if ((in_scan > 0) && |
| (existing_cunk <= 0 || |
| ((existing_serial + 1) & 3) == new_serial)) { |
| /* Forward scanning. |
| * Use new |
| * Delete the old one and drop through to |
| * update the tnode |
| */ |
| yaffs_chunk_del(dev, existing_cunk, 1, |
| __LINE__); |
| } else { |
| /* Backward scanning or we want to use the |
| * existing one |
| * Delete the new one and return early so that |
| * the tnode isn't changed |
| */ |
| yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); |
| return YAFFS_OK; |
| } |
| } |
| |
| } |
| |
| if (existing_cunk == 0) |
| in->n_data_chunks++; |
| |
| yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk); |
| |
| return YAFFS_OK; |
| } |
| |
| static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk) |
| { |
| struct yaffs_block_info *the_block; |
| unsigned block_no; |
| |
| yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk); |
| |
| block_no = chunk / dev->param.chunks_per_block; |
| the_block = yaffs_get_block_info(dev, block_no); |
| if (the_block) { |
| the_block->soft_del_pages++; |
| dev->n_free_chunks++; |
| yaffs2_update_oldest_dirty_seq(dev, block_no, the_block); |
| } |
| } |
| |
| /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all |
| * the chunks in the file. |
| * All soft deleting does is increment the block's softdelete count and pulls |
| * the chunk out of the tnode. |
| * Thus, essentially this is the same as DeleteWorker except that the chunks |
| * are soft deleted. |
| */ |
| |
| static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn, |
| u32 level, int chunk_offset) |
| { |
| int i; |
| int the_chunk; |
| int all_done = 1; |
| struct yaffs_dev *dev = in->my_dev; |
| |
| if (!tn) |
| return 1; |
| |
| if (level > 0) { |
| for (i = YAFFS_NTNODES_INTERNAL - 1; |
| all_done && i >= 0; |
| i--) { |
| if (tn->internal[i]) { |
| all_done = |
| yaffs_soft_del_worker(in, |
| tn->internal[i], |
| level - 1, |
| (chunk_offset << |
| YAFFS_TNODES_INTERNAL_BITS) |
| + i); |
| if (all_done) { |
| yaffs_free_tnode(dev, |
| tn->internal[i]); |
| tn->internal[i] = NULL; |
| } else { |
| /* Can this happen? */ |
| } |
| } |
| } |
| return (all_done) ? 1 : 0; |
| } |
| |
| /* level 0 */ |
| for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) { |
| the_chunk = yaffs_get_group_base(dev, tn, i); |
| if (the_chunk) { |
| yaffs_soft_del_chunk(dev, the_chunk); |
| yaffs_load_tnode_0(dev, tn, i, 0); |
| } |
| } |
| return 1; |
| } |
| |
| static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj) |
| { |
| struct yaffs_dev *dev = obj->my_dev; |
| struct yaffs_obj *parent; |
| |
| yaffs_verify_obj_in_dir(obj); |
| parent = obj->parent; |
| |
| yaffs_verify_dir(parent); |
| |
| if (dev && dev->param.remove_obj_fn) |
| dev->param.remove_obj_fn(obj); |
| |
| list_del_init(&obj->siblings); |
| obj->parent = NULL; |
| |
| yaffs_verify_dir(parent); |
| } |
| |
| void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj) |
| { |
| if (!directory) { |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "tragedy: Trying to add an object to a null pointer directory" |
| ); |
| BUG(); |
| return; |
| } |
| if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "tragedy: Trying to add an object to a non-directory" |
| ); |
| BUG(); |
| } |
| |
| if (obj->siblings.prev == NULL) { |
| /* Not initialised */ |
| BUG(); |
| } |
| |
| yaffs_verify_dir(directory); |
| |
| yaffs_remove_obj_from_dir(obj); |
| |
| /* Now add it */ |
| list_add(&obj->siblings, &directory->variant.dir_variant.children); |
| obj->parent = directory; |
| |
| if (directory == obj->my_dev->unlinked_dir |
| || directory == obj->my_dev->del_dir) { |
| obj->unlinked = 1; |
| obj->my_dev->n_unlinked_files++; |
| obj->rename_allowed = 0; |
| } |
| |
| yaffs_verify_dir(directory); |
| yaffs_verify_obj_in_dir(obj); |
| } |
| |
| static int yaffs_change_obj_name(struct yaffs_obj *obj, |
| struct yaffs_obj *new_dir, |
| const YCHAR *new_name, int force, int shadows) |
| { |
| int unlink_op; |
| int del_op; |
| struct yaffs_obj *existing_target; |
| |
| if (new_dir == NULL) |
| new_dir = obj->parent; /* use the old directory */ |
| |
| if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { |
| yaffs_trace(YAFFS_TRACE_ALWAYS, |
| "tragedy: yaffs_change_obj_name: new_dir is not a directory" |
| ); |
| BUG(); |
| } |
| |
| unlink_op = (new_dir == obj->my_dev->unlinked_dir); |
| del_op = (new_dir == obj->my_dev->del_dir); |
| |
| existing_target = yaffs_find_by_name(new_dir, new_name); |
| |
| /* If the object is a file going into the unlinked directory, |
| * then it is OK to just stuff it in since duplicate names are OK. |
| * else only proceed if the new name does not exist and we're putting |
| * it into a directory. |
| */ |
| if (!(unlink_op || del_op || force || |
| shadows > 0 || !existing_target) || |
| new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) |
| return YAFFS_FAIL; |
| |
| yaffs_set_obj_name(obj, new_name); |
| obj->dirty = 1; |
| yaffs_add_obj_to_dir(new_dir, obj); |
| |
| if (unlink_op) |
| obj->unlinked = 1; |
| |
| /* If it is a deletion then we mark it as a shrink for gc */ |
| if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0) |
| return YAFFS_OK; |
| |
| return YAFFS_FAIL; |
| } |
| |
| /*------------------------ Short Operations Cache ------------------------------ |
| * In many situations where there is no high level buffering a lot of |
| * reads might be short sequential reads, and a lot of writes may be short |
| * sequential writes. eg. scanning/writing a jpeg file. |
| * In these cases, a short read/write cache can provide a huge perfomance |
| * benefit with dumb-as-a-rock code. |
| * In Linux, the page cache provides read buffering and the short op cache |
| * provides write buffering. |
| * |
| * There are a small number (~10) of cache chunks per device so that we don't |
| * need a very intelligent search. |
| */ |
| |
| static int yaffs_obj_cache_dirty(struct yaffs_obj *obj) |
| { |
| struct yaffs_dev *dev = obj->my_dev; |
| int i; |
| struct yaffs_cache *cache; |
| int n_caches = obj->my_dev->param.n_caches; |
| |
| for (i = 0; i < n_caches; i++) { |
| cache = &dev->cache[i]; |
| if (cache->object == obj && cache->dirty) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| static void yaffs_flush_file_cache(struct yaffs_obj *obj) |
| { |
| struct yaffs_dev *dev = obj->my_dev; |
| int lowest = -99; /* Stop compiler whining. */ |
| int i; |
| struct yaffs_cache *cache; |
| int chunk_written = 0; |
| int n_caches = obj->my_dev->param.n_caches; |
| |
| if (n_caches < 1) |
| return; |
| do { |
| cache = NULL; |
| |
| /* Find the lowest dirty chunk for this object */ |
| for (i = 0; i < n_caches; i++) { |
| if (dev->cache[i].object == obj && |
| dev->cache[i].dirty) { |
| if (!cache || |
| dev->cache[i].chunk_id < lowest) { |
| cache = &dev->cache[i]; |
| lowest = cache->chunk_id; |
| } |
| } |
| } |
| |
| if (cache && !cache->locked) { |
| /* Write it out and free it up */ |
| chunk_written = |
| yaffs_wr_data_obj(cache->object, |
| cache->chunk_id, |
| cache->data, |
| cache->n_bytes, 1); |
| cache->dirty = 0; |
| cache->object = NULL; |
| } |
| } while (cache && chunk_written > 0); |
| |
| if (cache) |
| /* Hoosterman, disk full while writing cache out. */ |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "yaffs tragedy: no space during cache write"); |
| } |
| |
| /*yaffs_flush_whole_cache(dev) |
| * |
| * |
| */ |
| |
| void yaffs_flush_whole_cache(struct yaffs_dev *dev) |
| { |
| struct yaffs_obj *obj; |
| int n_caches = dev->param.n_caches; |
| int i; |
| |
| /* Find a dirty object in the cache and flush it... |
| * until there are no further dirty objects. |
| */ |
| do { |
| obj = NULL; |
| for (i = 0; i < n_caches && !obj; i++) { |
| if (dev->cache[i].object && dev->cache[i].dirty) |
| obj = dev->cache[i].object; |
| } |
| if (obj) |
| yaffs_flush_file_cache(obj); |
| } while (obj); |
| |
| } |
| |
| /* Grab us a cache chunk for use. |
| * First look for an empty one. |
| * Then look for the least recently used non-dirty one. |
| * Then look for the least recently used dirty one...., flush and look again. |
| */ |
| static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev) |
| { |
| int i; |
| |
| if (dev->param.n_caches > 0) { |
| for (i = 0; i < dev->param.n_caches; i++) { |
| if (!dev->cache[i].object) |
| return &dev->cache[i]; |
| } |
| } |
| return NULL; |
| } |
| |
| static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev) |
| { |
| struct yaffs_cache *cache; |
| struct yaffs_obj *the_obj; |
| int usage; |
| int i; |
| |
| if (dev->param.n_caches < 1) |
| return NULL; |
| |
| /* Try find a non-dirty one... */ |
| |
| cache = yaffs_grab_chunk_worker(dev); |
| |
| if (!cache) { |
| /* They were all dirty, find the LRU object and flush |
| * its cache, then find again. |
| * NB what's here is not very accurate, |
| * we actually flush the object with the LRU chunk. |
| */ |
| |
| /* With locking we can't assume we can use entry zero, |
| * Set the_obj to a valid pointer for Coverity. */ |
| the_obj = dev->cache[0].object; |
| usage = -1; |
| cache = NULL; |
| |
| for (i = 0; i < dev->param.n_caches; i++) { |
| if (dev->cache[i].object && |
| !dev->cache[i].locked && |
| (dev->cache[i].last_use < usage || |
| !cache)) { |
| usage = dev->cache[i].last_use; |
| the_obj = dev->cache[i].object; |
| cache = &dev->cache[i]; |
| } |
| } |
| |
| if (!cache || cache->dirty) { |
| /* Flush and try again */ |
| yaffs_flush_file_cache(the_obj); |
| cache = yaffs_grab_chunk_worker(dev); |
| } |
| } |
| return cache; |
| } |
| |
| /* Find a cached chunk */ |
| static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj, |
| int chunk_id) |
| { |
| struct yaffs_dev *dev = obj->my_dev; |
| int i; |
| |
| if (dev->param.n_caches < 1) |
| return NULL; |
| |
| for (i = 0; i < dev->param.n_caches; i++) { |
| if (dev->cache[i].object == obj && |
| dev->cache[i].chunk_id == chunk_id) { |
| dev->cache_hits++; |
| |
| return &dev->cache[i]; |
| } |
| } |
| return NULL; |
| } |
| |
| /* Mark the chunk for the least recently used algorithym */ |
| static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache, |
| int is_write) |
| { |
| int i; |
| |
| if (dev->param.n_caches < 1) |
| return; |
| |
| if (dev->cache_last_use < 0 || |
| dev->cache_last_use > 100000000) { |
| /* Reset the cache usages */ |
| for (i = 1; i < dev->param.n_caches; i++) |
| dev->cache[i].last_use = 0; |
| |
| dev->cache_last_use = 0; |
| } |
| dev->cache_last_use++; |
| cache->last_use = dev->cache_last_use; |
| |
| if (is_write) |
| cache->dirty = 1; |
| } |
| |
| /* Invalidate a single cache page. |
| * Do this when a whole page gets written, |
| * ie the short cache for this page is no longer valid. |
| */ |
| static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id) |
| { |
| struct yaffs_cache *cache; |
| |
| if (object->my_dev->param.n_caches > 0) { |
| cache = yaffs_find_chunk_cache(object, chunk_id); |
| |
| if (cache) |
| cache->object = NULL; |
| } |
| } |
| |
| /* Invalidate all the cache pages associated with this object |
| * Do this whenever ther file is deleted or resized. |
| */ |
| static void yaffs_invalidate_whole_cache(struct yaffs_obj *in) |
| { |
| int i; |
| struct yaffs_dev *dev = in->my_dev; |
| |
| if (dev->param.n_caches > 0) { |
| /* Invalidate it. */ |
| for (i = 0; i < dev->param.n_caches; i++) { |
| if (dev->cache[i].object == in) |
| dev->cache[i].object = NULL; |
| } |
| } |
| } |
| |
| static void yaffs_unhash_obj(struct yaffs_obj *obj) |
| { |
| int bucket; |
| struct yaffs_dev *dev = obj->my_dev; |
| |
| /* If it is still linked into the bucket list, free from the list */ |
| if (!list_empty(&obj->hash_link)) { |
| list_del_init(&obj->hash_link); |
| bucket = yaffs_hash_fn(obj->obj_id); |
| dev->obj_bucket[bucket].count--; |
| } |
| } |
| |
| /* FreeObject frees up a Object and puts it back on the free list */ |
| static void yaffs_free_obj(struct yaffs_obj *obj) |
| { |
| struct yaffs_dev *dev; |
| |
| if (!obj) { |
| BUG(); |
| return; |
| } |
| dev = obj->my_dev; |
| yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p", |
| obj, obj->my_inode); |
| if (obj->parent) |
| BUG(); |
| if (!list_empty(&obj->siblings)) |
| BUG(); |
| |
| if (obj->my_inode) { |
| /* We're still hooked up to a cached inode. |
| * Don't delete now, but mark for later deletion |
| */ |
| obj->defered_free = 1; |
| return; |
| } |
| |
| yaffs_unhash_obj(obj); |
| |
| yaffs_free_raw_obj(dev, obj); |
| dev->n_obj--; |
| dev->checkpoint_blocks_required = 0; /* force recalculation */ |
| } |
| |
| void yaffs_handle_defered_free(struct yaffs_obj *obj) |
| { |
| if (obj->defered_free) |
| yaffs_free_obj(obj); |
| } |
| |
| static int yaffs_generic_obj_del(struct yaffs_obj *in) |
| { |
| /* Iinvalidate the file's data in the cache, without flushing. */ |
| yaffs_invalidate_whole_cache(in); |
| |
| if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) { |
| /* Move to unlinked directory so we have a deletion record */ |
| yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0, |
| 0); |
| } |
| |
| yaffs_remove_obj_from_dir(in); |
| yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__); |
| in->hdr_chunk = 0; |
| |
| yaffs_free_obj(in); |
| return YAFFS_OK; |
| |
| } |
| |
| static void yaffs_soft_del_file(struct yaffs_obj *obj) |
| { |
| if (!obj->deleted || |
| obj->variant_type != YAFFS_OBJECT_TYPE_FILE || |
| obj->soft_del) |
| return; |
| |
| if (obj->n_data_chunks <= 0) { |
| /* Empty file with no duplicate object headers, |
| * just delete it immediately */ |
| yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top); |
| obj->variant.file_variant.top = NULL; |
| yaffs_trace(YAFFS_TRACE_TRACING, |
| "yaffs: Deleting empty file %d", |
| obj->obj_id); |
| yaffs_generic_obj_del(obj); |
| } else { |
| yaffs_soft_del_worker(obj, |
| obj->variant.file_variant.top, |
| obj->variant. |
| file_variant.top_level, 0); |
| obj->soft_del = 1; |
| } |
| } |
| |
| /* Pruning removes any part of the file structure tree that is beyond the |
| * bounds of the file (ie that does not point to chunks). |
| * |
| * A file should only get pruned when its size is reduced. |
| * |
| * Before pruning, the chunks must be pulled from the tree and the |
| * level 0 tnode entries must be zeroed out. |
| * Could also use this for file deletion, but that's probably better handled |
| * by a special case. |
| * |
| * This function is recursive. For levels > 0 the function is called again on |
| * any sub-tree. For level == 0 we just check if the sub-tree has data. |
| * If there is no data in a subtree then it is pruned. |
| */ |
| |
| static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev, |
| struct yaffs_tnode *tn, u32 level, |
| int del0) |
| { |
| int i; |
| int has_data; |
| |
| if (!tn) |
| return tn; |
| |
| has_data = 0; |
| |
| if (level > 0) { |
| for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) { |
| if (tn->internal[i]) { |
| tn->internal[i] = |
| yaffs_prune_worker(dev, |
| tn->internal[i], |
| level - 1, |
| (i == 0) ? del0 : 1); |
| } |
| |
| if (tn->internal[i]) |
| has_data++; |
| } |
| } else { |
| int tnode_size_u32 = dev->tnode_size / sizeof(u32); |
| u32 *map = (u32 *) tn; |
| |
| for (i = 0; !has_data && i < tnode_size_u32; i++) { |
| if (map[i]) |
| has_data++; |
| } |
| } |
| |
| if (has_data == 0 && del0) { |
| /* Free and return NULL */ |
| yaffs_free_tnode(dev, tn); |
| tn = NULL; |
| } |
| return tn; |
| } |
| |
| static int yaffs_prune_tree(struct yaffs_dev *dev, |
| struct yaffs_file_var *file_struct) |
| { |
| int i; |
| int has_data; |
| int done = 0; |
| struct yaffs_tnode *tn; |
| |
| if (file_struct->top_level < 1) |
| return YAFFS_OK; |
| |
| file_struct->top = |
| yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0); |
| |
| /* Now we have a tree with all the non-zero branches NULL but |
| * the height is the same as it was. |
| * Let's see if we can trim internal tnodes to shorten the tree. |
| * We can do this if only the 0th element in the tnode is in use |
| * (ie all the non-zero are NULL) |
| */ |
| |
| while (file_struct->top_level && !done) { |
| tn = file_struct->top; |
| |
| has_data = 0; |
| for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) { |
| if (tn->internal[i]) |
| has_data++; |
| } |
| |
| if (!has_data) { |
| file_struct->top = tn->internal[0]; |
| file_struct->top_level--; |
| yaffs_free_tnode(dev, tn); |
| } else { |
| done = 1; |
| } |
| } |
| |
| return YAFFS_OK; |
| } |
| |
| /*-------------------- End of File Structure functions.-------------------*/ |
| |
| /* alloc_empty_obj gets us a clean Object.*/ |
| static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev) |
| { |
| struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev); |
| |
| if (!obj) |
| return obj; |
| |
| dev->n_obj++; |
| |
| /* Now sweeten it up... */ |
| |
| memset(obj, 0, sizeof(struct yaffs_obj)); |
| obj->being_created = 1; |
| |
| obj->my_dev = dev; |
| obj->hdr_chunk = 0; |
| obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN; |
| INIT_LIST_HEAD(&(obj->hard_links)); |
| INIT_LIST_HEAD(&(obj->hash_link)); |
| INIT_LIST_HEAD(&obj->siblings); |
| |
| /* Now make the directory sane */ |
| if (dev->root_dir) { |
| obj->parent = dev->root_dir; |
| list_add(&(obj->siblings), |
| &dev->root_dir->variant.dir_variant.children); |
| } |
| |
| /* Add it to the lost and found directory. |
| * NB Can't put root or lost-n-found in lost-n-found so |
| * check if lost-n-found exists first |
| */ |
| if (dev->lost_n_found) |
| yaffs_add_obj_to_dir(dev->lost_n_found, obj); |
| |
| obj->being_created = 0; |
| |
| dev->checkpoint_blocks_required = 0; /* force recalculation */ |
| |
| return obj; |
| } |
| |
| static int yaffs_find_nice_bucket(struct yaffs_dev *dev) |
| { |
| int i; |
| int l = 999; |
| int lowest = 999999; |
| |
| /* Search for the shortest list or one that |
| * isn't too long. |
| */ |
| |
| for (i = 0; i < 10 && lowest > 4; i++) { |
| dev->bucket_finder++; |
| dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS; |
| if (dev->obj_bucket[dev->bucket_finder].count < lowest) { |
| lowest = dev->obj_bucket[dev->bucket_finder].count; |
| l = dev->bucket_finder; |
| } |
| } |
| |
| return l; |
| } |
| |
| static int yaffs_new_obj_id(struct yaffs_dev *dev) |
| { |
| int bucket = yaffs_find_nice_bucket(dev); |
| int found = 0; |
| struct list_head *i; |
| u32 n = (u32) bucket; |
| |
| /* Now find an object value that has not already been taken |
| * by scanning the list. |
| */ |
| |
| while (!found) { |
| found = 1; |
| n += YAFFS_NOBJECT_BUCKETS; |
| list_for_each(i, &dev->obj_bucket[bucket].list) { |
| /* If there is already one in the list */ |
| if (list_entry(i, struct yaffs_obj, |
| hash_link)->obj_id == n) { |
| found = 0; |
| break; |
| } |
| } |
| } |
| return n; |
| } |
| |
| static void yaffs_hash_obj(struct yaffs_obj *in) |
| { |
| int bucket = yaffs_hash_fn(in->obj_id); |
| struct yaffs_dev *dev = in->my_dev; |
| |
| list_add(&in->hash_link, &dev->obj_bucket[bucket].list); |
| dev->obj_bucket[bucket].count++; |
| } |
| |
| struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number) |
| { |
| int bucket = yaffs_hash_fn(number); |
| struct list_head *i; |
| struct yaffs_obj *in; |
| |
| list_for_each(i, &dev->obj_bucket[bucket].list) { |
| /* Look if it is in the list */ |
| in = list_entry(i, struct yaffs_obj, hash_link); |
| if (in->obj_id == number) { |
| /* Don't show if it is defered free */ |
| if (in->defered_free) |
| return NULL; |
| return in; |
| } |
| } |
| |
| return NULL; |
| } |
| |
| struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number, |
| enum yaffs_obj_type type) |
| { |
| struct yaffs_obj *the_obj = NULL; |
| struct yaffs_tnode *tn = NULL; |
| |
| if (number < 0) |
| number = yaffs_new_obj_id(dev); |
| |
| if (type == YAFFS_OBJECT_TYPE_FILE) { |
| tn = yaffs_get_tnode(dev); |
| if (!tn) |
| return NULL; |
| } |
| |
| the_obj = yaffs_alloc_empty_obj(dev); |
| if (!the_obj) { |
| if (tn) |
| yaffs_free_tnode(dev, tn); |
| return NULL; |
| } |
| |
| the_obj->fake = 0; |
| the_obj->rename_allowed = 1; |
| the_obj->unlink_allowed = 1; |
| the_obj->obj_id = number; |
| yaffs_hash_obj(the_obj); |
| the_obj->variant_type = type; |
| yaffs_load_current_time(the_obj, 1, 1); |
| |
| switch (type) { |
| case YAFFS_OBJECT_TYPE_FILE: |
| the_obj->variant.file_variant.file_size = 0; |
| the_obj->variant.file_variant.scanned_size = 0; |
| the_obj->variant.file_variant.shrink_size = |
| yaffs_max_file_size(dev); |
| the_obj->variant.file_variant.top_level = 0; |
| the_obj->variant.file_variant.top = tn; |
| break; |
| case YAFFS_OBJECT_TYPE_DIRECTORY: |
| INIT_LIST_HEAD(&the_obj->variant.dir_variant.children); |
| INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty); |
| break; |
| case YAFFS_OBJECT_TYPE_SYMLINK: |
| case YAFFS_OBJECT_TYPE_HARDLINK: |
| case YAFFS_OBJECT_TYPE_SPECIAL: |
| /* No action required */ |
| break; |
| case YAFFS_OBJECT_TYPE_UNKNOWN: |
| /* todo this should not happen */ |
| break; |
| } |
| return the_obj; |
| } |
| |
| static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev, |
| int number, u32 mode) |
| { |
| |
| struct yaffs_obj *obj = |
| yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY); |
| |
| if (!obj) |
| return NULL; |
| |
| obj->fake = 1; /* it is fake so it might not use NAND */ |
| obj->rename_allowed = 0; |
| obj->unlink_allowed = 0; |
| obj->deleted = 0; |
| obj->unlinked = 0; |
| obj->yst_mode = mode; |
| obj->my_dev = dev; |
| obj->hdr_chunk = 0; /* Not a valid chunk. */ |
| return obj; |
| |
| } |
| |
| |
| static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev) |
| { |
| int i; |
| |
| dev->n_obj = 0; |
| dev->n_tnodes = 0; |
| yaffs_init_raw_tnodes_and_objs(dev); |
| |
| for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { |
| INIT_LIST_HEAD(&dev->obj_bucket[i].list); |
| dev->obj_bucket[i].count = 0; |
| } |
| } |
| |
| struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev, |
| int number, |
| enum yaffs_obj_type type) |
| { |
| struct yaffs_obj *the_obj = NULL; |
| |
| if (number > 0) |
| the_obj = yaffs_find_by_number(dev, number); |
| |
| if (!the_obj) |
| the_obj = yaffs_new_obj(dev, number, type); |
| |
| return the_obj; |
| |
| } |
| |
| YCHAR *yaffs_clone_str(const YCHAR *str) |
| { |
| YCHAR *new_str = NULL; |
| int len; |
| |
| if (!str) |
| str = _Y(""); |
| |
| len = yaffs_strnlen(str, YAFFS_MAX_ALIAS_LENGTH); |
| new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS); |
| if (new_str) { |
| yaffs_strncpy(new_str, str, len); |
| new_str[len] = 0; |
| } |
| return new_str; |
| |
| } |
| /* |
| *yaffs_update_parent() handles fixing a directories mtime and ctime when a new |
| * link (ie. name) is created or deleted in the directory. |
| * |
| * ie. |
| * create dir/a : update dir's mtime/ctime |
| * rm dir/a: update dir's mtime/ctime |
| * modify dir/a: don't update dir's mtimme/ctime |
| * |
| * This can be handled immediately or defered. Defering helps reduce the number |
| * of updates when many files in a directory are changed within a brief period. |
| * |
| * If the directory updating is defered then yaffs_update_dirty_dirs must be |
| * called periodically. |
| */ |
| |
| static void yaffs_update_parent(struct yaffs_obj *obj) |
| { |
| struct yaffs_dev *dev; |
| |
| if (!obj) |
| return; |
| dev = obj->my_dev; |
| obj->dirty = 1; |
| yaffs_load_current_time(obj, 0, 1); |
| if (dev->param.defered_dir_update) { |
| struct list_head *link = &obj->variant.dir_variant.dirty; |
| |
| if (list_empty(link)) { |
| list_add(link, &dev->dirty_dirs); |
| yaffs_trace(YAFFS_TRACE_BACKGROUND, |
| "Added object %d to dirty directories", |
| obj->obj_id); |
| } |
| |
| } else { |
| yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); |
| } |
| } |
| |
| void yaffs_update_dirty_dirs(struct yaffs_dev *dev) |
| { |
| struct list_head *link; |
| struct yaffs_obj *obj; |
| struct yaffs_dir_var *d_s; |
| union yaffs_obj_var *o_v; |
| |
| yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories"); |
| |
| while (!list_empty(&dev->dirty_dirs)) { |
| link = dev->dirty_dirs.next; |
| list_del_init(link); |
| |
| d_s = list_entry(link, struct yaffs_dir_var, dirty); |
| o_v = list_entry(d_s, union yaffs_obj_var, dir_variant); |
| obj = list_entry(o_v, struct yaffs_obj, variant); |
| |
| yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d", |
| obj->obj_id); |
| |
| if (obj->dirty) |
| yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); |
| } |
| } |
| |
| /* |
| * Mknod (create) a new object. |
| * equiv_obj only has meaning for a hard link; |
| * alias_str only has meaning for a symlink. |
| * rdev only has meaning for devices (a subset of special objects) |
| */ |
| |
| static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type, |
| struct yaffs_obj *parent, |
| const YCHAR *name, |
| u32 mode, |
| u32 uid, |
| u32 gid, |
| struct yaffs_obj *equiv_obj, |
| const YCHAR *alias_str, u32 rdev) |
| { |
| struct yaffs_obj *in; |
| YCHAR *str = NULL; |
| struct yaffs_dev *dev = parent->my_dev; |
| |
| /* Check if the entry exists. |
| * If it does then fail the call since we don't want a dup. */ |
| if (yaffs_find_by_name(parent, name)) |
| return NULL; |
| |
| if (type == YAFFS_OBJECT_TYPE_SYMLINK) { |
| str = yaffs_clone_str(alias_str); |
| if (!str) |
| return NULL; |
| } |
| |
| in = yaffs_new_obj(dev, -1, type); |
| |
| if (!in) { |
| kfree(str); |
| return NULL; |
| } |
| |
| in->hdr_chunk = 0; |
| in->valid = 1; |
| in->variant_type = type; |
| |
| in->yst_mode = mode; |
| |
| yaffs_attribs_init(in, gid, uid, rdev); |
| |
| in->n_data_chunks = 0; |
| |
| yaffs_set_obj_name(in, name); |
| in->dirty = 1; |
| |
| yaffs_add_obj_to_dir(parent, in); |
| |
| in->my_dev = parent->my_dev; |
| |
| switch (type) { |
| case YAFFS_OBJECT_TYPE_SYMLINK: |
| in->variant.symlink_variant.alias = str; |
| break; |
| case YAFFS_OBJECT_TYPE_HARDLINK: |
| in->variant.hardlink_variant.equiv_obj = equiv_obj; |
| in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id; |
| list_add(&in->hard_links, &equiv_obj->hard_links); |
| break; |
| case YAFFS_OBJECT_TYPE_FILE: |
| case YAFFS_OBJECT_TYPE_DIRECTORY: |
| case YAFFS_OBJECT_TYPE_SPECIAL: |
| case YAFFS_OBJECT_TYPE_UNKNOWN: |
| /* do nothing */ |
| break; |
| } |
| |
| if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) { |
| /* Could not create the object header, fail */ |
| yaffs_del_obj(in); |
| in = NULL; |
| } |
| |
| if (in) |
| yaffs_update_parent(parent); |
| |
| return in; |
| } |
| |
| struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent, |
| const YCHAR *name, u32 mode, u32 uid, |
| u32 gid) |
| { |
| return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode, |
| uid, gid, NULL, NULL, 0); |
| } |
| |
| struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name, |
| u32 mode, u32 uid, u32 gid) |
| { |
| return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name, |
| mode, uid, gid, NULL, NULL, 0); |
| } |
| |
| struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent, |
| const YCHAR *name, u32 mode, u32 uid, |
| u32 gid, u32 rdev) |
| { |
| return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode, |
| uid, gid, NULL, NULL, rdev); |
| } |
| |
| struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent, |
| const YCHAR *name, u32 mode, u32 uid, |
| u32 gid, const YCHAR *alias) |
| { |
| return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode, |
| uid, gid, NULL, alias, 0); |
| } |
| |
| /* yaffs_link_obj returns the object id of the equivalent object.*/ |
| struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name, |
| struct yaffs_obj *equiv_obj) |
| { |
| /* Get the real object in case we were fed a hard link obj */ |
| equiv_obj = yaffs_get_equivalent_obj(equiv_obj); |
| |
| if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK, |
| parent, name, 0, 0, 0, |
| equiv_obj, NULL, 0)) |
| return equiv_obj; |
| |
| return NULL; |
| |
| } |
| |
| |
| |
| /*---------------------- Block Management and Page Allocation -------------*/ |
| |
| static void yaffs_deinit_blocks(struct yaffs_dev *dev) |
| { |
| if (dev->block_info_alt && dev->block_info) |
| vfree(dev->block_info); |
| else |
| kfree(dev->block_info); |
| |
| dev->block_info_alt = 0; |
| |
| dev->block_info = NULL; |
| |
| if (dev->chunk_bits_alt && dev->chunk_bits) |
| vfree(dev->chunk_bits); |
| else |
| kfree(dev->chunk_bits); |
| dev->chunk_bits_alt = 0; |
| dev->chunk_bits = NULL; |
| } |
| |
| static int yaffs_init_blocks(struct yaffs_dev *dev) |
| { |
| int n_blocks = dev->internal_end_block - dev->internal_start_block + 1; |
| |
| dev->block_info = NULL; |
| dev->chunk_bits = NULL; |
| dev->alloc_block = -1; /* force it to get a new one */ |
| |
| /* If the first allocation strategy fails, thry the alternate one */ |
| dev->block_info = |
| kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS); |
| if (!dev->block_info) { |
| dev->block_info = |
| vmalloc(n_blocks * sizeof(struct yaffs_block_info)); |
| dev->block_info_alt = 1; |
| } else { |
| dev->block_info_alt = 0; |
| } |
| |
| if (!dev->block_info) |
| goto alloc_error; |
| |
| /* Set up dynamic blockinfo stuff. Round up bytes. */ |
| dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8; |
| dev->chunk_bits = |
| kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS); |
| if (!dev->chunk_bits) { |
| dev->chunk_bits = |
| vmalloc(dev->chunk_bit_stride * n_blocks); |
| dev->chunk_bits_alt = 1; |
| } else { |
| dev->chunk_bits_alt = 0; |
| } |
| if (!dev->chunk_bits) |
| goto alloc_error; |
| |
| |
| memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info)); |
| memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks); |
| return YAFFS_OK; |
| |
| alloc_error: |
| yaffs_deinit_blocks(dev); |
| return YAFFS_FAIL; |
| } |
| |
| |
| void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no) |
| { |
| struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no); |
| int erased_ok = 0; |
| int i; |
| |
| /* If the block is still healthy erase it and mark as clean. |
| * If the block has had a data failure, then retire it. |
| */ |
| |
| yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE, |
| "yaffs_block_became_dirty block %d state %d %s", |
| block_no, bi->block_state, |
| (bi->needs_retiring) ? "needs retiring" : ""); |
| |
| yaffs2_clear_oldest_dirty_seq(dev, bi); |
| |
| bi->block_state = YAFFS_BLOCK_STATE_DIRTY; |
| |
| /* If this is the block being garbage collected then stop gc'ing */ |
| if (block_no == dev->gc_block) |
| dev->gc_block = 0; |
| |
| /* If this block is currently the best candidate for gc |
| * then drop as a candidate */ |
| if (block_no == dev->gc_dirtiest) { |
| dev->gc_dirtiest = 0; |
| dev->gc_pages_in_use = 0; |
| } |
| |
| if (!bi->needs_retiring) { |
| yaffs2_checkpt_invalidate(dev); |
| erased_ok = yaffs_erase_block(dev, block_no); |
| if (!erased_ok) { |
| dev->n_erase_failures++; |
| yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, |
| "**>> Erasure failed %d", block_no); |
| } |
| } |
| |
| /* Verify erasure if needed */ |
| if (erased_ok && |
| ((yaffs_trace_mask & YAFFS_TRACE_ERASE) || |
| !yaffs_skip_verification(dev))) { |
| for (i = 0; i < dev->param.chunks_per_block; i++) { |
| if (!yaffs_check_chunk_erased(dev, |
| block_no * dev->param.chunks_per_block + i)) { |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| ">>Block %d erasure supposedly OK, but chunk %d not erased", |
| block_no, i); |
| } |
| } |
| } |
| |
| if (!erased_ok) { |
| /* We lost a block of free space */ |
| dev->n_free_chunks -= dev->param.chunks_per_block; |
| yaffs_retire_block(dev, block_no); |
| yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, |
| "**>> Block %d retired", block_no); |
| return; |
| } |
| |
| /* Clean it up... */ |
| bi->block_state = YAFFS_BLOCK_STATE_EMPTY; |
| bi->seq_number = 0; |
| dev->n_erased_blocks++; |
| bi->pages_in_use = 0; |
| bi->soft_del_pages = 0; |
| bi->has_shrink_hdr = 0; |
| bi->skip_erased_check = 1; /* Clean, so no need to check */ |
| bi->gc_prioritise = 0; |
| bi->has_summary = 0; |
| |
| yaffs_clear_chunk_bits(dev, block_no); |
| |
| yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no); |
| } |
| |
| static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev, |
| struct yaffs_block_info *bi, |
| int old_chunk, u8 *buffer) |
| { |
| int new_chunk; |
| int mark_flash = 1; |
| struct yaffs_ext_tags tags; |
| struct yaffs_obj *object; |
| int matching_chunk; |
| int ret_val = YAFFS_OK; |
| |
| memset(&tags, 0, sizeof(tags)); |
| yaffs_rd_chunk_tags_nand(dev, old_chunk, |
| buffer, &tags); |
| object = yaffs_find_by_number(dev, tags.obj_id); |
| |
| yaffs_trace(YAFFS_TRACE_GC_DETAIL, |
| "Collecting chunk in block %d, %d %d %d ", |
| dev->gc_chunk, tags.obj_id, |
| tags.chunk_id, tags.n_bytes); |
| |
| if (object && !yaffs_skip_verification(dev)) { |
| if (tags.chunk_id == 0) |
| matching_chunk = |
| object->hdr_chunk; |
| else if (object->soft_del) |
| /* Defeat the test */ |
| matching_chunk = old_chunk; |
| else |
| matching_chunk = |
| yaffs_find_chunk_in_file |
| (object, tags.chunk_id, |
| NULL); |
| |
| if (old_chunk != matching_chunk) |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "gc: page in gc mismatch: %d %d %d %d", |
| old_chunk, |
| matching_chunk, |
| tags.obj_id, |
| tags.chunk_id); |
| } |
| |
| if (!object) { |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "page %d in gc has no object: %d %d %d ", |
| old_chunk, |
| tags.obj_id, tags.chunk_id, |
| tags.n_bytes); |
| } |
| |
| if (object && |
| object->deleted && |
| object->soft_del && tags.chunk_id != 0) { |
| /* Data chunk in a soft deleted file, |
| * throw it away. |
| * It's a soft deleted data chunk, |
| * No need to copy this, just forget |
| * about it and fix up the object. |
| */ |
| |
| /* Free chunks already includes |
| * softdeleted chunks, how ever this |
| * chunk is going to soon be really |
| * deleted which will increment free |
| * chunks. We have to decrement free |
| * chunks so this works out properly. |
| */ |
| dev->n_free_chunks--; |
| bi->soft_del_pages--; |
| |
| object->n_data_chunks--; |
| if (object->n_data_chunks <= 0) { |
| /* remeber to clean up obj */ |
| dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id; |
| dev->n_clean_ups++; |
| } |
| mark_flash = 0; |
| } else if (object) { |
| /* It's either a data chunk in a live |
| * file or an ObjectHeader, so we're |
| * interested in it. |
| * NB Need to keep the ObjectHeaders of |
| * deleted files until the whole file |
| * has been deleted off |
| */ |
| tags.serial_number++; |
| dev->n_gc_copies++; |
| |
| if (tags.chunk_id == 0) { |
| /* It is an object Id, |
| * We need to nuke the |
| * shrinkheader flags since its |
| * work is done. |
| * Also need to clean up |
| * shadowing. |
| */ |
| struct yaffs_obj_hdr *oh; |
| oh = (struct yaffs_obj_hdr *) buffer; |
| |
| oh->is_shrink = 0; |
| tags.extra_is_shrink = 0; |
| oh->shadows_obj = 0; |
| oh->inband_shadowed_obj_id = 0; |
| tags.extra_shadows = 0; |
| |
| /* Update file size */ |
| if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) { |
| yaffs_oh_size_load(oh, |
| object->variant.file_variant.file_size); |
| tags.extra_file_size = |
| object->variant.file_variant.file_size; |
| } |
| |
| yaffs_verify_oh(object, oh, &tags, 1); |
| new_chunk = |
| yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1); |
| } else { |
| new_chunk = |
| yaffs_write_new_chunk(dev, buffer, &tags, 1); |
| } |
| |
| if (new_chunk < 0) { |
| ret_val = YAFFS_FAIL; |
| } else { |
| |
| /* Now fix up the Tnodes etc. */ |
| |
| if (tags.chunk_id == 0) { |
| /* It's a header */ |
| object->hdr_chunk = new_chunk; |
| object->serial = tags.serial_number; |
| } else { |
| /* It's a data chunk */ |
| yaffs_put_chunk_in_file(object, tags.chunk_id, |
| new_chunk, 0); |
| } |
| } |
| } |
| if (ret_val == YAFFS_OK) |
| yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__); |
| return ret_val; |
| } |
| |
| static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block) |
| { |
| int old_chunk; |
| int ret_val = YAFFS_OK; |
| int i; |
| int is_checkpt_block; |
| int max_copies; |
| int chunks_before = yaffs_get_erased_chunks(dev); |
| int chunks_after; |
| struct yaffs_block_info *bi = yaffs_get_block_info(dev, block); |
| |
| is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT); |
| |
| yaffs_trace(YAFFS_TRACE_TRACING, |
| "Collecting block %d, in use %d, shrink %d, whole_block %d", |
| block, bi->pages_in_use, bi->has_shrink_hdr, |
| whole_block); |
| |
| /*yaffs_verify_free_chunks(dev); */ |
| |
| if (bi->block_state == YAFFS_BLOCK_STATE_FULL) |
| bi->block_state = YAFFS_BLOCK_STATE_COLLECTING; |
| |
| bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */ |
| |
| dev->gc_disable = 1; |
| |
| yaffs_summary_gc(dev, block); |
| |
| if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) { |
| yaffs_trace(YAFFS_TRACE_TRACING, |
| "Collecting block %d that has no chunks in use", |
| block); |
| yaffs_block_became_dirty(dev, block); |
| } else { |
| |
| u8 *buffer = yaffs_get_temp_buffer(dev); |
| |
| yaffs_verify_blk(dev, bi, block); |
| |
| max_copies = (whole_block) ? dev->param.chunks_per_block : 5; |
| old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk; |
| |
| for (/* init already done */ ; |
| ret_val == YAFFS_OK && |
| dev->gc_chunk < dev->param.chunks_per_block && |
| (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) && |
| max_copies > 0; |
| dev->gc_chunk++, old_chunk++) { |
| if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) { |
| /* Page is in use and might need to be copied */ |
| max_copies--; |
| ret_val = yaffs_gc_process_chunk(dev, bi, |
| old_chunk, buffer); |
| } |
| } |
| yaffs_release_temp_buffer(dev, buffer); |
| } |
| |
| yaffs_verify_collected_blk(dev, bi, block); |
| |
| if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { |
| /* |
| * The gc did not complete. Set block state back to FULL |
| * because checkpointing does not restore gc. |
| */ |
| bi->block_state = YAFFS_BLOCK_STATE_FULL; |
| } else { |
| /* The gc completed. */ |
| /* Do any required cleanups */ |
| for (i = 0; i < dev->n_clean_ups; i++) { |
| /* Time to delete the file too */ |
| struct yaffs_obj *object = |
| yaffs_find_by_number(dev, dev->gc_cleanup_list[i]); |
| if (object) { |
| yaffs_free_tnode(dev, |
| object->variant.file_variant.top); |
| object->variant.file_variant.top = NULL; |
| yaffs_trace(YAFFS_TRACE_GC, |
| "yaffs: About to finally delete object %d", |
| object->obj_id); |
| yaffs_generic_obj_del(object); |
| object->my_dev->n_deleted_files--; |
| } |
| |
| } |
| chunks_after = yaffs_get_erased_chunks(dev); |
| if (chunks_before >= chunks_after) |
| yaffs_trace(YAFFS_TRACE_GC, |
| "gc did not increase free chunks before %d after %d", |
| chunks_before, chunks_after); |
| dev->gc_block = 0; |
| dev->gc_chunk = 0; |
| dev->n_clean_ups = 0; |
| } |
| |
| dev->gc_disable = 0; |
| |
| return ret_val; |
| } |
| |
| /* |
| * find_gc_block() selects the dirtiest block (or close enough) |
| * for garbage collection. |
| */ |
| |
| static unsigned yaffs_find_gc_block(struct yaffs_dev *dev, |
| int aggressive, int background) |
| { |
| int i; |
| int iterations; |
| unsigned selected = 0; |
| int prioritised = 0; |
| int prioritised_exist = 0; |
| struct yaffs_block_info *bi; |
| int threshold; |
| |
| /* First let's see if we need to grab a prioritised block */ |
| if (dev->has_pending_prioritised_gc && !aggressive) { |
| dev->gc_dirtiest = 0; |
| bi = dev->block_info; |
| for (i = dev->internal_start_block; |
| i <= dev->internal_end_block && !selected; i++) { |
| |
| if (bi->gc_prioritise) { |
| prioritised_exist = 1; |
| if (bi->block_state == YAFFS_BLOCK_STATE_FULL && |
| yaffs_block_ok_for_gc(dev, bi)) { |
| selected = i; |
| prioritised = 1; |
| } |
| } |
| bi++; |
| } |
| |
| /* |
| * If there is a prioritised block and none was selected then |
| * this happened because there is at least one old dirty block |
| * gumming up the works. Let's gc the oldest dirty block. |
| */ |
| |
| if (prioritised_exist && |
| !selected && dev->oldest_dirty_block > 0) |
| selected = dev->oldest_dirty_block; |
| |
| if (!prioritised_exist) /* None found, so we can clear this */ |
| dev->has_pending_prioritised_gc = 0; |
| } |
| |
| /* If we're doing aggressive GC then we are happy to take a less-dirty |
| * block, and search harder. |
| * else (leasurely gc), then we only bother to do this if the |
| * block has only a few pages in use. |
| */ |
| |
| if (!selected) { |
| int pages_used; |
| int n_blocks = |
| dev->internal_end_block - dev->internal_start_block + 1; |
| if (aggressive) { |
| threshold = dev->param.chunks_per_block; |
| iterations = n_blocks; |
| } else { |
| int max_threshold; |
| |
| if (background) |
| max_threshold = dev->param.chunks_per_block / 2; |
| else |
| max_threshold = dev->param.chunks_per_block / 8; |
| |
| if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD) |
| max_threshold = YAFFS_GC_PASSIVE_THRESHOLD; |
| |
| threshold = background ? (dev->gc_not_done + 2) * 2 : 0; |
| if (threshold < YAFFS_GC_PASSIVE_THRESHOLD) |
| threshold = YAFFS_GC_PASSIVE_THRESHOLD; |
| if (threshold > max_threshold) |
| threshold = max_threshold; |
| |
| iterations = n_blocks / 16 + 1; |
| if (iterations > 100) |
| iterations = 100; |
| } |
| |
| for (i = 0; |
| i < iterations && |
| (dev->gc_dirtiest < 1 || |
| dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); |
| i++) { |
| dev->gc_block_finder++; |
| if (dev->gc_block_finder < dev->internal_start_block || |
| dev->gc_block_finder > dev->internal_end_block) |
| dev->gc_block_finder = |
| dev->internal_start_block; |
| |
| bi = yaffs_get_block_info(dev, dev->gc_block_finder); |
| |
| pages_used = bi->pages_in_use - bi->soft_del_pages; |
| |
| if (bi->block_state == YAFFS_BLOCK_STATE_FULL && |
| pages_used < dev->param.chunks_per_block && |
| (dev->gc_dirtiest < 1 || |
| pages_used < dev->gc_pages_in_use) && |
| yaffs_block_ok_for_gc(dev, bi)) { |
| dev->gc_dirtiest = dev->gc_block_finder; |
| dev->gc_pages_in_use = pages_used; |
| } |
| } |
| |
| if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold) |
| selected = dev->gc_dirtiest; |
| } |
| |
| /* |
| * If nothing has been selected for a while, try the oldest dirty |
| * because that's gumming up the works. |
| */ |
| |
| if (!selected && dev->param.is_yaffs2 && |
| dev->gc_not_done >= (background ? 10 : 20)) { |
| yaffs2_find_oldest_dirty_seq(dev); |
| if (dev->oldest_dirty_block > 0) { |
| selected = dev->oldest_dirty_block; |
| dev->gc_dirtiest = selected; |
| dev->oldest_dirty_gc_count++; |
| bi = yaffs_get_block_info(dev, selected); |
| dev->gc_pages_in_use = |
| bi->pages_in_use - bi->soft_del_pages; |
| } else { |
| dev->gc_not_done = 0; |
| } |
| } |
| |
| if (selected) { |
| yaffs_trace(YAFFS_TRACE_GC, |
| "GC Selected block %d with %d free, prioritised:%d", |
| selected, |
| dev->param.chunks_per_block - dev->gc_pages_in_use, |
| prioritised); |
| |
| dev->n_gc_blocks++; |
| if (background) |
| dev->bg_gcs++; |
| |
| dev->gc_dirtiest = 0; |
| dev->gc_pages_in_use = 0; |
| dev->gc_not_done = 0; |
| if (dev->refresh_skip > 0) |
| dev->refresh_skip--; |
| } else { |
| dev->gc_not_done++; |
| yaffs_trace(YAFFS_TRACE_GC, |
| "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s", |
| dev->gc_block_finder, dev->gc_not_done, threshold, |
| dev->gc_dirtiest, dev->gc_pages_in_use, |
| dev->oldest_dirty_block, background ? " bg" : ""); |
| } |
| |
| return selected; |
| } |
| |
| /* New garbage collector |
| * If we're very low on erased blocks then we do aggressive garbage collection |
| * otherwise we do "leasurely" garbage collection. |
| * Aggressive gc looks further (whole array) and will accept less dirty blocks. |
| * Passive gc only inspects smaller areas and only accepts more dirty blocks. |
| * |
| * The idea is to help clear out space in a more spread-out manner. |
| * Dunno if it really does anything useful. |
| */ |
| static int yaffs_check_gc(struct yaffs_dev *dev, int background) |
| { |
| int aggressive = 0; |
| int gc_ok = YAFFS_OK; |
| int max_tries = 0; |
| int min_erased; |
| int erased_chunks; |
| int checkpt_block_adjust; |
| |
| if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0) |
| return YAFFS_OK; |
| |
| if (dev->gc_disable) |
| /* Bail out so we don't get recursive gc */ |
| return YAFFS_OK; |
| |
| /* This loop should pass the first time. |
| * Only loops here if the collection does not increase space. |
| */ |
| |
| do { |
| max_tries++; |
| |
| checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev); |
| |
| min_erased = |
| dev->param.n_reserved_blocks + checkpt_block_adjust + 1; |
| erased_chunks = |
| dev->n_erased_blocks * dev->param.chunks_per_block; |
| |
| /* If we need a block soon then do aggressive gc. */ |
| if (dev->n_erased_blocks < min_erased) |
| aggressive = 1; |
| else { |
| if (!background |
| && erased_chunks > (dev->n_free_chunks / 4)) |
| break; |
| |
| if (dev->gc_skip > 20) |
| dev->gc_skip = 20; |
| if (erased_chunks < dev->n_free_chunks / 2 || |
| dev->gc_skip < 1 || background) |
| aggressive = 0; |
| else { |
| dev->gc_skip--; |
| break; |
| } |
| } |
| |
| dev->gc_skip = 5; |
| |
| /* If we don't already have a block being gc'd then see if we |
| * should start another */ |
| |
| if (dev->gc_block < 1 && !aggressive) { |
| dev->gc_block = yaffs2_find_refresh_block(dev); |
| dev->gc_chunk = 0; |
| dev->n_clean_ups = 0; |
| } |
| if (dev->gc_block < 1) { |
| dev->gc_block = |
| yaffs_find_gc_block(dev, aggressive, background); |
| dev->gc_chunk = 0; |
| dev->n_clean_ups = 0; |
| } |
| |
| if (dev->gc_block > 0) { |
| dev->all_gcs++; |
| if (!aggressive) |
| dev->passive_gc_count++; |
| |
| yaffs_trace(YAFFS_TRACE_GC, |
| "yaffs: GC n_erased_blocks %d aggressive %d", |
| dev->n_erased_blocks, aggressive); |
| |
| gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive); |
| } |
| |
| if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) && |
| dev->gc_block > 0) { |
| yaffs_trace(YAFFS_TRACE_GC, |
| "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d", |
| dev->n_erased_blocks, max_tries, |
| dev->gc_block); |
| } |
| } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) && |
| (dev->gc_block > 0) && (max_tries < 2)); |
| |
| return aggressive ? gc_ok : YAFFS_OK; |
| } |
| |
| /* |
| * yaffs_bg_gc() |
| * Garbage collects. Intended to be called from a background thread. |
| * Returns non-zero if at least half the free chunks are erased. |
| */ |
| int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency) |
| { |
| int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block; |
| |
| yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency); |
| |
| yaffs_check_gc(dev, 1); |
| return erased_chunks > dev->n_free_chunks / 2; |
| } |
| |
| /*-------------------- Data file manipulation -----------------*/ |
| |
| static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer) |
| { |
| int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL); |
| |
| if (nand_chunk >= 0) |
| return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk, |
| buffer, NULL); |
| else { |
| yaffs_trace(YAFFS_TRACE_NANDACCESS, |
| "Chunk %d not found zero instead", |
| nand_chunk); |
| /* get sane (zero) data if you read a hole */ |
| memset(buffer, 0, in->my_dev->data_bytes_per_chunk); |
| return 0; |
| } |
| |
| } |
| |
| void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash, |
| int lyn) |
| { |
| int block; |
| int page; |
| struct yaffs_ext_tags tags; |
| struct yaffs_block_info *bi; |
| |
| if (chunk_id <= 0) |
| return; |
| |
| dev->n_deletions++; |
| block = chunk_id / dev->param.chunks_per_block; |
| page = chunk_id % dev->param.chunks_per_block; |
| |
| if (!yaffs_check_chunk_bit(dev, block, page)) |
| yaffs_trace(YAFFS_TRACE_VERIFY, |
| "Deleting invalid chunk %d", chunk_id); |
| |
| bi = yaffs_get_block_info(dev, block); |
| |
| yaffs2_update_oldest_dirty_seq(dev, block, bi); |
| |
| yaffs_trace(YAFFS_TRACE_DELETION, |
| "line %d delete of chunk %d", |
| lyn, chunk_id); |
| |
| if (!dev->param.is_yaffs2 && mark_flash && |
| bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) { |
| |
| memset(&tags, 0, sizeof(tags)); |
| tags.is_deleted = 1; |
| yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags); |
| yaffs_handle_chunk_update(dev, chunk_id, &tags); |
| } else { |
| dev->n_unmarked_deletions++; |
| } |
| |
| /* Pull out of the management area. |
| * If the whole block became dirty, this will kick off an erasure. |
| */ |
| if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING || |
| bi->block_state == YAFFS_BLOCK_STATE_FULL || |
| bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN || |
| bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { |
| dev->n_free_chunks++; |
| yaffs_clear_chunk_bit(dev, block, page); |
| bi->pages_in_use--; |
| |
| if (bi->pages_in_use == 0 && |
| !bi->has_shrink_hdr && |
| bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING && |
| bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) { |
| yaffs_block_became_dirty(dev, block); |
| } |
| } |
| } |
| |
| static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, |
| const u8 *buffer, int n_bytes, int use_reserve) |
| { |
| /* Find old chunk Need to do this to get serial number |
| * Write new one and patch into tree. |
| * Invalidate old tags. |
| */ |
| |
| int prev_chunk_id; |
| struct yaffs_ext_tags prev_tags; |
| int new_chunk_id; |
| struct yaffs_ext_tags new_tags; |
| struct yaffs_dev *dev = in->my_dev; |
| |
| yaffs_check_gc(dev, 0); |
| |
| /* Get the previous chunk at this location in the file if it exists. |
| * If it does not exist then put a zero into the tree. This creates |
| * the tnode now, rather than later when it is harder to clean up. |
| */ |
| prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags); |
| if (prev_chunk_id < 1 && |
| !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0)) |
| return 0; |
| |
| /* Set up new tags */ |
| memset(&new_tags, 0, sizeof(new_tags)); |
| |
| new_tags.chunk_id = inode_chunk; |
| new_tags.obj_id = in->obj_id; |
| new_tags.serial_number = |
| (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1; |
| new_tags.n_bytes = n_bytes; |
| |
| if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) { |
| yaffs_trace(YAFFS_TRACE_ERROR, |
| "Writing %d bytes to chunk!!!!!!!!!", |
| n_bytes); |
| BUG(); |
| } |
| |
| new_chunk_id = |
| yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve); |
| |
| if (new_chunk_id > 0) { |
| yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0); |
| |
| if (prev_chunk_id > 0) |
| yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__); |
| |
| yaffs_verify_file_sane(in); |
| } |
| return new_chunk_id; |
| |
| } |
| |
| |
| |
| static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set, |
| const YCHAR *name, const void *value, int size, |
| int flags) |
| { |
| struct yaffs_xattr_mod xmod; |
| int result; |
| |
| xmod.set = set; |
| xmod.name = name; |
| xmod.data = value; |
| xmod.size = size; |
| xmod.flags = flags; |
| xmod.result = -ENOSPC; |
| |
| result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod); |
| |
| if (result > 0) |
| return xmod.result; |
| else |
| return -ENOSPC; |
| } |
| |
| static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer, |
| struct yaffs_xattr_mod *xmod) |
| { |
| int retval = 0; |
| int x_offs = sizeof(struct yaffs_obj_hdr); |
| struct yaffs_dev *dev = obj->my_dev; |
| int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); |
| char *x_buffer = buffer + x_offs; |
| |
| if (xmod->set) |
| retval = |
| nval_set(x_buffer, x_size, xmod->name, xmod->data, |
| xmod->size, xmod->flags); |
| else |
| retval = nval_del(x_buffer, x_size, xmod->name); |
| |
| obj->has_xattr = nval_hasvalues(x_buffer, x_size); |
| obj->xattr_known = 1; |
| xmod->result = retval; |
| |
| return retval; |
| } |
| |
| static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name, |
| void *value, int size) |
| { |
| char *buffer = NULL; |
| int result; |
| struct yaffs_ext_tags tags; |
| struct yaffs_dev *dev = obj->my_dev; |
| int x_offs = sizeof(struct yaffs_obj_hdr); |
| int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); |
| char *x_buffer; |
| int retval = 0; |
| |
| if (obj->hdr_chunk < 1) |
| return -ENODATA; |
| |
| /* If we know that the object has no xattribs then don't do all the |
| * reading and parsing. |
| */ |
| if (obj->xattr_known && !obj->has_xattr) { |
| if (name) |
| return -ENODATA; |
| else |
| return 0; |
| } |
| |
| buffer = (char *)yaffs_get_temp_buffer(dev); |
| if (!buffer) |
| return -ENOMEM; |
| |
| result = |
| yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags); |
| |
| if (result != YAFFS_OK) |
| retval = -ENOENT; |
| else { |
| x_buffer = buffer + x_offs; |
| |
| if (!obj->xattr_known) { |
| obj->has_xattr = nval_hasvalues(x_buffer, x_size); |
| obj->xattr_known = 1; |
| } |
| |
| if (name) |
| retval = nval_get(x_buffer, x_size, name, value, size); |
| else |
| retval = nval_list(x_buffer, x_size, value, size); |
| } |
| yaffs_release_temp_buffer(dev, (u8 *) buffer); |
| return retval; |
| } |
| |
| int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name, |
| const void *value, int size, int flags) |
| { |
| return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags); |
| } |
| |
| int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name) |
| { |
| return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0); |
| } |
| |
| int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value, |
| int size) |
| { |
| return yaffs_do_xattrib_fetch(obj, name, value, size); |
| } |
| |
| int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size) |
| { |
| return yaffs_do_xattrib_fetch(obj, NULL, buffer, size); |
| } |
| |
| static void yaffs_check_obj_details_loaded(struct yaffs_obj *in) |
| { |
| u8 *buf; |
| struct yaffs_obj_hdr *oh; |
| struct yaffs_dev *dev; |
| struct yaffs_ext_tags tags; |
| |
| if (!in || !in->lazy_loaded || in->hdr_chunk < 1) |
| return; |
| |
| dev = in->my_dev; |
| in->lazy_loaded = 0; |
| buf = yaffs_get_temp_buffer(dev); |
| |
| yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags); |
| oh = (struct yaffs_obj_hdr *)buf; |
| |
| in->yst_mode = oh->yst_mode; |
| yaffs_load_attribs(in, oh); |
| yaffs_set_obj_name_from_oh(in, oh); |
| |
| if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) { |
| in->variant.symlink_variant.alias = |
| yaffs_clone_str(oh->alias); |
| } |
| yaffs_release_temp_buffer(dev, buf); |
| } |
| |
| static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name, |
| const YCHAR *oh_name, int buff_size) |
| { |
| #ifdef CONFIG_YAFFS_AUTO_UNICODE |
| if (dev->param.auto_unicode) { |
| |