5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-2001 Ben Fennema
17 * 12/06/98 blf Created file.
26 #include <linux/string.h>
27 #include <linux/mutex.h>
29 uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
30 uint16_t partition, uint32_t offset)
32 struct udf_sb_info *sbi = UDF_SB(sb);
33 struct udf_part_map *map;
34 if (partition >= sbi->s_partitions) {
35 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
36 block, partition, offset);
39 map = &sbi->s_partmaps[partition];
40 if (map->s_partition_func)
41 return map->s_partition_func(sb, block, partition, offset);
43 return map->s_partition_root + block + offset;
46 uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
47 uint16_t partition, uint32_t offset)
49 struct buffer_head *bh = NULL;
53 struct udf_sb_info *sbi = UDF_SB(sb);
54 struct udf_part_map *map;
55 struct udf_virtual_data *vdata;
56 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
58 map = &sbi->s_partmaps[partition];
59 vdata = &map->s_type_specific.s_virtual;
61 if (block > vdata->s_num_entries) {
62 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
63 block, vdata->s_num_entries);
67 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
68 loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
69 vdata->s_start_offset))[block]);
72 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
75 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
76 index = block % (sb->s_blocksize / sizeof(uint32_t));
79 index = vdata->s_start_offset / sizeof(uint32_t) + block;
82 loc = udf_block_map(sbi->s_vat_inode, newblock);
84 bh = sb_bread(sb, loc);
86 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
87 sb, block, partition, loc, index);
91 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
96 if (iinfo->i_location.partitionReferenceNum == partition) {
97 udf_debug("recursive call to udf_get_pblock!\n");
101 return udf_get_pblock(sb, loc,
102 iinfo->i_location.partitionReferenceNum,
106 inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
107 uint16_t partition, uint32_t offset)
109 return udf_get_pblock_virt15(sb, block, partition, offset);
112 uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
113 uint16_t partition, uint32_t offset)
116 struct sparingTable *st = NULL;
117 struct udf_sb_info *sbi = UDF_SB(sb);
118 struct udf_part_map *map;
120 struct udf_sparing_data *sdata;
122 map = &sbi->s_partmaps[partition];
123 sdata = &map->s_type_specific.s_sparing;
124 packet = (block + offset) & ~(sdata->s_packet_len - 1);
126 for (i = 0; i < 4; i++) {
127 if (sdata->s_spar_map[i] != NULL) {
128 st = (struct sparingTable *)
129 sdata->s_spar_map[i]->b_data;
135 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
136 struct sparingEntry *entry = &st->mapEntry[i];
137 u32 origLoc = le32_to_cpu(entry->origLocation);
138 if (origLoc >= 0xFFFFFFF0)
140 else if (origLoc == packet)
141 return le32_to_cpu(entry->mappedLocation) +
143 (sdata->s_packet_len - 1));
144 else if (origLoc > packet)
149 return map->s_partition_root + block + offset;
152 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
154 struct udf_sparing_data *sdata;
155 struct sparingTable *st = NULL;
156 struct sparingEntry mapEntry;
159 struct udf_sb_info *sbi = UDF_SB(sb);
160 u16 reallocationTableLen;
161 struct buffer_head *bh;
164 mutex_lock(&sbi->s_alloc_mutex);
165 for (i = 0; i < sbi->s_partitions; i++) {
166 struct udf_part_map *map = &sbi->s_partmaps[i];
167 if (old_block > map->s_partition_root &&
168 old_block < map->s_partition_root + map->s_partition_len) {
169 sdata = &map->s_type_specific.s_sparing;
170 packet = (old_block - map->s_partition_root) &
171 ~(sdata->s_packet_len - 1);
173 for (j = 0; j < 4; j++)
174 if (sdata->s_spar_map[j] != NULL) {
175 st = (struct sparingTable *)
176 sdata->s_spar_map[j]->b_data;
185 reallocationTableLen =
186 le16_to_cpu(st->reallocationTableLen);
187 for (k = 0; k < reallocationTableLen; k++) {
188 struct sparingEntry *entry = &st->mapEntry[k];
189 u32 origLoc = le32_to_cpu(entry->origLocation);
191 if (origLoc == 0xFFFFFFFF) {
194 bh = sdata->s_spar_map[j];
198 st = (struct sparingTable *)
200 entry->origLocation =
203 sizeof(struct sparingTable) +
204 reallocationTableLen *
205 sizeof(struct sparingEntry);
206 udf_update_tag((char *)st, len);
207 mark_buffer_dirty(bh);
209 *new_block = le32_to_cpu(
210 entry->mappedLocation) +
212 map->s_partition_root) &
213 (sdata->s_packet_len - 1));
216 } else if (origLoc == packet) {
217 *new_block = le32_to_cpu(
218 entry->mappedLocation) +
220 map->s_partition_root) &
221 (sdata->s_packet_len - 1));
224 } else if (origLoc > packet)
228 for (l = k; l < reallocationTableLen; l++) {
229 struct sparingEntry *entry = &st->mapEntry[l];
230 u32 origLoc = le32_to_cpu(entry->origLocation);
232 if (origLoc != 0xFFFFFFFF)
236 bh = sdata->s_spar_map[j];
240 st = (struct sparingTable *)bh->b_data;
241 mapEntry = st->mapEntry[l];
242 mapEntry.origLocation =
244 memmove(&st->mapEntry[k + 1],
247 sizeof(struct sparingEntry));
248 st->mapEntry[k] = mapEntry;
249 udf_update_tag((char *)st,
250 sizeof(struct sparingTable) +
251 reallocationTableLen *
252 sizeof(struct sparingEntry));
253 mark_buffer_dirty(bh);
257 st->mapEntry[k].mappedLocation) +
258 ((old_block - map->s_partition_root) &
259 (sdata->s_packet_len - 1));
269 if (i == sbi->s_partitions) {
270 /* outside of partitions */
271 /* for now, fail =) */
276 mutex_unlock(&sbi->s_alloc_mutex);
280 static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
281 uint16_t partition, uint32_t offset)
283 struct super_block *sb = inode->i_sb;
284 struct udf_part_map *map;
285 struct kernel_lb_addr eloc;
288 struct extent_position epos = {};
291 if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
292 (EXT_RECORDED_ALLOCATED >> 30))
293 phyblock = 0xFFFFFFFF;
295 map = &UDF_SB(sb)->s_partmaps[partition];
296 /* map to sparable/physical partition desc */
297 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
298 map->s_partition_num, ext_offset + offset);
305 uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
306 uint16_t partition, uint32_t offset)
308 struct udf_sb_info *sbi = UDF_SB(sb);
309 struct udf_part_map *map;
310 struct udf_meta_data *mdata;
314 udf_debug("READING from METADATA\n");
316 map = &sbi->s_partmaps[partition];
317 mdata = &map->s_type_specific.s_metadata;
318 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
320 /* We shouldn't mount such media... */
322 retblk = udf_try_read_meta(inode, block, partition, offset);
323 if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
324 udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
325 if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
326 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
327 mdata->s_mirror_file_loc, map->s_partition_num);
328 mdata->s_flags |= MF_MIRROR_FE_LOADED;
331 inode = mdata->s_mirror_fe;
334 retblk = udf_try_read_meta(inode, block, partition, offset);