| /* |
| * Copyright (c) 2019 The Fuchsia Authors |
| * |
| * SPDX-License-Identifier: BSD-3-Clause |
| */ |
| |
| #include <common.h> |
| #include <amlogic/aml_mtd.h> |
| #include <amlogic/storage.h> |
| #include <blk.h> |
| #include <emmc_partitions.h> |
| #include <mmc.h> |
| #include <part_efi.h> |
| #include <zircon/partition.h> |
| #include <zircon/partition_internal.h> |
| #include <zircon/vboot.h> |
| |
| #define ERASE_GROUP_SIZE (512 * 1024) |
| |
| /* Temporary buffer when we need to cache data locally. Only valid for use |
| within a single call, do not expect the contents to be consistent across |
| multiple calls since other functions may have modified it. */ |
| static uint8_t temp_buffer[ERASE_GROUP_SIZE] __aligned(ARCH_DMA_MINALIGN); |
| |
| static int store_call_setup(const zircon_partition *part, uint64_t offset, |
| size_t length) |
| { |
| if (offset + length > part->size) { |
| fprintf(stderr, |
| "Error: operation cannot exceed partition boundary\n"); |
| return -1; |
| } |
| |
| if (store_set_device(BOOT_EMMC)) { |
| fprintf(stderr, "Error: Unable to set storage device\n"); |
| return -1; |
| } |
| |
| return 0; |
| } |
| |
| static int gpt_part_read(const zircon_partition *part, uint64_t offset, |
| void *buffer, size_t length) |
| { |
| if (store_call_setup(part, offset, length)) { |
| return -1; |
| } |
| |
| // store_read() offset must be at a block boundary. If our |offset| is |
| // not, read the first partial block manually. |
| uint64_t first_block_offset = offset % part->data->mmc->read_bl_len; |
| size_t first_block_bytes_used = 0; |
| if (first_block_offset) { |
| // Read from the beginning of the block to at most the end of the block. |
| size_t first_block_length = |
| min(length + first_block_offset, |
| (uint64_t)part->data->mmc->read_bl_len); |
| |
| int ret = store_read(part->data->gpt_name, |
| part->data->gpt_offset + offset - |
| first_block_offset, |
| first_block_length, temp_buffer); |
| if (ret) { |
| fprintf(stderr, "Error: partial page read failure\n"); |
| return ret; |
| } |
| |
| // Track the bytes we just read, but leave them in the temp |
| // buffer for now (see note below about alignment). |
| first_block_bytes_used = |
| first_block_length - first_block_offset; |
| offset += first_block_bytes_used; |
| length -= first_block_bytes_used; |
| |
| // If that was the entire requested amount, we're done. |
| if (length == 0) { |
| memcpy(buffer, temp_buffer + first_block_offset, |
| first_block_bytes_used); |
| return 0; |
| } |
| } |
| |
| // store_read() also requires that the destination buffer has |
| // certain alignment (DMA alignment I think?), so we have to read this |
| // next chunk directly into |buffer| without any offset. |
| int ret = store_read(part->data->gpt_name, |
| part->data->gpt_offset + offset, length, buffer); |
| if (ret != 0) { |
| return ret; |
| } |
| |
| // Finally, if we did a first page partial read, move the main read |
| // back and copy the first partial read into the correct place. |
| if (first_block_bytes_used) { |
| memmove(buffer + first_block_bytes_used, buffer, length); |
| memcpy(buffer, temp_buffer + first_block_offset, |
| first_block_bytes_used); |
| } |
| |
| return 0; |
| } |
| |
| static int gpt_part_write(const zircon_partition *part, uint64_t offset, |
| const void *buffer, size_t length) |
| { |
| if (store_call_setup(part, offset, length)) { |
| return -1; |
| } |
| |
| // store_write() offset must be at a block boundary. If our |offset| is |
| // not, read the first partial block, modify and write back manually. |
| // Compute the unaligned write offset within the first block. |
| size_t first_block_write_offset = |
| offset % part->data->mmc->write_bl_len; |
| if (first_block_write_offset) { |
| static uint8_t scratch_buffer[512] __aligned(ARCH_DMA_MINALIGN); |
| assert(sizeof(scratch_buffer) == part->data->mmc->write_bl_len); |
| // The aligned offset of the first block. |
| size_t first_block_offset = offset - first_block_write_offset; |
| |
| // Read the block |
| int res = gpt_part_read(part, first_block_offset, |
| scratch_buffer, sizeof(scratch_buffer)); |
| if (res) { |
| return res; |
| } |
| |
| // The amount of data to write for the first block. It's the smaller |
| // of the total length and the remaining part in the block. |
| size_t first_block_write_length = |
| min(length, |
| sizeof(scratch_buffer) - first_block_write_offset); |
| |
| // Modify |
| memcpy(scratch_buffer + first_block_write_offset, buffer, |
| first_block_write_length); |
| |
| // Write back the modified data. |
| res = gpt_part_write(part, first_block_offset, scratch_buffer, |
| sizeof(scratch_buffer)); |
| if (res) { |
| return res; |
| } |
| |
| offset += first_block_write_length; |
| length -= first_block_write_length; |
| buffer = (const uint8_t *)(buffer) + first_block_write_length; |
| if (length == 0) { |
| return 0; |
| } |
| } |
| |
| return store_write(part->data->gpt_name, |
| part->data->gpt_offset + offset, length, buffer); |
| } |
| |
| static int gpt_part_erase(const zircon_partition *part) |
| { |
| if (store_call_setup(part, 0, 0)) { |
| return -1; |
| } |
| |
| // store_erase can only erase erase-block aligned partitions. |
| // For unaligned partitions, this uses store_write to write 0x00 to the unaligned sections. |
| |
| size_t min_erase_size = |
| part->data->mmc->erase_grp_size * part->data->mmc->write_bl_len; |
| |
| if (part->data->abs_offset % min_erase_size == 0 && |
| part->size % min_erase_size == 0) { |
| return store_erase(part->data->gpt_name, part->data->gpt_offset, |
| part->size, 0); |
| } |
| |
| if (sizeof(temp_buffer) < min_erase_size) { |
| fprintf(stderr, "Error: erase buffer too small\n"); |
| return -1; |
| } |
| |
| if (part->size <= sizeof(temp_buffer)) { |
| memset(temp_buffer, 0x00, part->size); |
| return store_write(part->data->gpt_name, part->data->gpt_offset, |
| part->size, temp_buffer); |
| } |
| |
| uint64_t erase_start = roundup(part->data->abs_offset, min_erase_size); |
| |
| // 'write' to erase beginning of partition if unaligned |
| if (part->data->abs_offset < erase_start) { |
| uint64_t write_len = erase_start - part->data->abs_offset; |
| |
| memset(temp_buffer, 0x00, write_len); |
| if (store_write(part->data->gpt_name, part->data->gpt_offset, |
| write_len, temp_buffer)) { |
| return -1; |
| }; |
| } |
| |
| uint64_t part_end = part->data->abs_offset + part->size; |
| |
| uint64_t erase_end = rounddown(part_end, min_erase_size); |
| |
| // 'write' to erase end of partition if unaligned. |
| if (part_end > erase_end) { |
| uint64_t write_len = part_end - erase_end; |
| |
| uint64_t part_offset = erase_end - part->data->abs_offset; |
| |
| memset(temp_buffer, 0x00, write_len); |
| if (store_write(part->data->gpt_name, |
| part->data->gpt_offset + part_offset, write_len, |
| temp_buffer)) { |
| return -1; |
| }; |
| } |
| |
| // If there are any full erase blocks within the partition, erase them. |
| if (erase_start < erase_end) { |
| uint64_t erase_off_lba = |
| erase_start / part->data->blk_desc->blksz; |
| uint64_t erase_len_lba = |
| (erase_end - erase_start) / part->data->blk_desc->blksz; |
| |
| return blk_derase(part->data->blk_desc, erase_off_lba, |
| erase_len_lba); |
| } |
| |
| return 0; |
| } |
| |
| static int bootloader_write(const zircon_partition *part, uint64_t offset, |
| const void *buffer, size_t length) |
| { |
| if (store_call_setup(part, offset, length)) { |
| return -1; |
| } |
| |
| if (offset != 0) { |
| fprintf(stderr, |
| "Error: Unable to write bootloader at offset\n"); |
| return -1; |
| } |
| |
| return store_boot_write(BOOT_LOADER, BOOT_OPS_ALL, length, buffer); |
| } |
| |
| static int bootloader_erase(const zircon_partition *part) |
| { |
| if (store_call_setup(part, 0, 0)) { |
| return -1; |
| } |
| |
| return store_boot_erase(BOOT_LOADER, BOOT_OPS_ALL); |
| } |
| |
| static int reset_hwpart(struct blk_desc *blk_desc) |
| { |
| if (blk_dselect_hwpart(blk_desc, MMC_PART_USER)) { |
| fprintf(stderr, "Error: switch hwpart failed\n"); |
| return -1; |
| } |
| return 0; |
| } |
| |
| static int set_hwpart(struct blk_desc *blk_desc, int hw_part) |
| { |
| if (blk_dselect_hwpart(blk_desc, hw_part)) { |
| fprintf(stderr, "Error: switch hwpart failed\n"); |
| reset_hwpart(blk_desc); |
| return -1; |
| } |
| return 0; |
| } |
| |
| enum raw_emmc_op_type { |
| RAW_EMMC_OP_READ, |
| RAW_EMMC_OP_WRITE, |
| RAW_EMMC_OP_ERASE, |
| }; |
| |
| static int raw_emmc_erase_unaligned(struct blk_desc *blk_desc, lbaint_t offset, |
| lbaint_t size, lbaint_t erase_grp_size) |
| { |
| lbaint_t end = size + offset; |
| lbaint_t left = roundup(offset, erase_grp_size); |
| lbaint_t right = rounddown(end, erase_grp_size); |
| if (right > left) { |
| if (blk_derase(blk_desc, left, right - left)) { |
| fprintf(stderr, "Error: failed to erase blocks\n"); |
| return -1; |
| } |
| } |
| |
| lbaint_t segs[][2] = { |
| { offset, min(left, end) }, |
| { max(offset, right), end }, |
| }; |
| assert(sizeof(temp_buffer) == erase_grp_size * blk_desc->blksz); |
| memset(temp_buffer, 0x00, |
| max(segs[0][1] - segs[0][0], segs[1][1] - segs[1][0])); |
| |
| for (uint32_t i = 0; i < (offset >= right ? 1 : 2); i++) { |
| lbaint_t to_write = segs[i][1] - segs[i][0]; |
| uint64_t n = |
| blk_dwrite(blk_desc, segs[i][0], to_write, temp_buffer); |
| if (n != to_write) { |
| fprintf(stderr, |
| "Error: attempted to write " LBAFU " blocks, " |
| "wrote %llu blocks\n", |
| to_write, n); |
| return -1; |
| } |
| } |
| return 0; |
| } |
| |
| static int raw_emmc_op(enum raw_emmc_op_type op, const zircon_partition *part, |
| uint64_t offset, size_t length, void *read_buf, |
| const void *write_buf) |
| { |
| struct blk_desc *blk_desc = part->data->blk_desc; |
| |
| lbaint_t lba_offset = part->data->lba_offset; |
| lbaint_t lba_size = part->data->lba_size; |
| |
| if (op == RAW_EMMC_OP_READ || op == RAW_EMMC_OP_WRITE) { |
| if (offset + length > part->size) { |
| fprintf(stderr, |
| "Error: operation cannot exceed partition boundary\n"); |
| return -1; |
| } |
| |
| if (offset % blk_desc->blksz || length % blk_desc->blksz) { |
| fprintf(stderr, |
| "Error: length/offset must be multiples of block size\n"); |
| return -1; |
| } |
| |
| lba_offset += offset / blk_desc->blksz; |
| lba_size = length / blk_desc->blksz; |
| } |
| |
| if (set_hwpart(blk_desc, part->data->hw_part)) { |
| return -1; |
| } |
| |
| int ret = 0; |
| uint64_t n; |
| |
| switch (op) { |
| case RAW_EMMC_OP_READ: |
| n = blk_dread(blk_desc, lba_offset, lba_size, read_buf); |
| if (n != lba_size) { |
| fprintf(stderr, |
| "Error: attempted to read " LBAFU |
| " blocks, actually read %llu blocks\n", |
| lba_size, n); |
| ret = -1; |
| } |
| break; |
| |
| case RAW_EMMC_OP_WRITE: |
| n = blk_dwrite(blk_desc, lba_offset, lba_size, write_buf); |
| if (n != lba_size) { |
| fprintf(stderr, |
| "Error: attempted to write " LBAFU |
| " blocks, wrote %llu blocks\n", |
| lba_size, n); |
| ret = -1; |
| } |
| break; |
| |
| case RAW_EMMC_OP_ERASE: |
| // blk_derase silently erases the surrounding erase block, |
| // so write 0x00 instead if unaligned. |
| if (!(lba_offset % part->data->mmc->erase_grp_size || |
| lba_size % part->data->mmc->erase_grp_size)) { |
| if (blk_derase(blk_desc, lba_offset, lba_size)) { |
| fprintf(stderr, |
| "Error: erase of " LBAFU |
| " blocks failed\n", |
| lba_size); |
| ret = -1; |
| } |
| break; |
| } |
| |
| ret = raw_emmc_erase_unaligned(blk_desc, lba_offset, lba_size, |
| part->data->mmc->erase_grp_size); |
| break; |
| } |
| |
| if (reset_hwpart(blk_desc)) { |
| return -1; |
| } |
| |
| return ret; |
| } |
| |
| static int raw_emmc_read(const zircon_partition *part, uint64_t offset, |
| void *buffer, size_t length) |
| { |
| return raw_emmc_op(RAW_EMMC_OP_READ, part, offset, length, buffer, |
| NULL); |
| } |
| |
| static int raw_emmc_write(const zircon_partition *part, uint64_t offset, |
| const void *buffer, size_t length) |
| { |
| return raw_emmc_op(RAW_EMMC_OP_WRITE, part, offset, length, NULL, |
| buffer); |
| } |
| |
| static int raw_emmc_erase(const zircon_partition *part) |
| { |
| return raw_emmc_op(RAW_EMMC_OP_ERASE, part, 0, 0, NULL, NULL); |
| } |
| |
| static int gpt_write(const zircon_partition *part, uint64_t offset, |
| const void *buffer, size_t length) |
| { |
| if (offset != 0) { |
| fprintf(stderr, "Error: Unable to write GPT at offset\n"); |
| return -1; |
| } |
| |
| if (length > part->size) { |
| fprintf(stderr, "Error: write larger than partition size\n"); |
| return -1; |
| } |
| |
| /* write_mbr_and_gpt_partitions() does a fair bit of modification to the GPT |
| buffer, both for validation checks and for conversion to a backup header. |
| |
| While we could probably get away with modifying the data at the moment, |
| it breaks the write() API contract and may cause unexpected problems in |
| the future. Instead, we just copy the GPT to the temporary buffer and do |
| the operation there. */ |
| if (length > sizeof(temp_buffer)) { |
| fprintf(stderr, "Error: GPT larger than temp buffer\n"); |
| return -1; |
| } |
| memcpy(temp_buffer, buffer, length); |
| |
| if (write_mbr_and_gpt_partitions(part->data->blk_desc, temp_buffer)) { |
| fprintf(stderr, "Error: GPT write failed\n"); |
| return -1; |
| } |
| |
| // reinitialize mmc to update the cached partition table. |
| mmc_device_init(part->data->mmc); |
| |
| return 0; |
| } |
| |
| static int gpt_erase(const zircon_partition *part) |
| { |
| if (raw_emmc_erase(part)) { |
| return -1; |
| } |
| |
| zircon_partition *gpt_backup = zircon_get_partition("gpt_backup"); |
| if (!gpt_backup) { |
| fprintf(stderr, "Error: Unable to find gpt_backup\n"); |
| return -1; |
| } |
| |
| if (raw_emmc_erase(gpt_backup)) { |
| zircon_free_partition(gpt_backup); |
| return -1; |
| } |
| |
| zircon_free_partition(gpt_backup); |
| return 0; |
| } |
| |
| static int composite_get_part_and_op_size(const subpart_t *sub, size_t img_len, |
| zircon_partition **out_part, |
| uint32_t *op_size) |
| { |
| *out_part = zircon_get_partition(sub->name); |
| if (!(*out_part)) { |
| fprintf(stderr, "Failed to get sub partition %s\n", sub->name); |
| return -1; |
| } |
| // Set to 0 value if |*op_size| doesn't matter. (i.e. erase) |
| if (!img_len) |
| return 0; |
| |
| if (sub->image_offset >= img_len) { |
| fprintf(stderr, |
| "image offset %u for sub partition %s " |
| "exceeds image size %zu\n", |
| sub->image_offset, sub->name, img_len); |
| return -1; |
| } |
| |
| uint32_t available_img_size = (img_len - sub->image_offset); |
| |
| // |sub->image_size|=0 specifies that we read/write upto the |
| // partition size at most. |
| uint32_t allowed_size = |
| sub->image_size == 0 ? (*out_part)->size : sub->image_size; |
| |
| // Further take into consideration available image size. |
| *op_size = min(allowed_size, available_img_size); |
| |
| return 0; |
| } |
| |
| static int composite_part_write(const zircon_partition *part, uint64_t offset, |
| const void *buffer, size_t length) |
| { |
| if (offset) { |
| fprintf(stderr, |
| "composite partition does not support offset.\n"); |
| return -1; |
| } |
| |
| for (uint32_t i = 0; i < part->data->sub_parts_count; i++) { |
| const subpart_t *sub = &part->data->sub_parts[i]; |
| zircon_partition *sub_part = NULL; |
| uint32_t image_size; |
| |
| if (composite_get_part_and_op_size(sub, length, &sub_part, |
| &image_size)) { |
| zircon_free_partition(sub_part); |
| return -1; |
| } |
| |
| int ret = sub_part->write( |
| sub_part, 0, |
| &((const uint8_t *)buffer)[sub->image_offset], |
| image_size); |
| if (ret) { |
| zircon_free_partition(sub_part); |
| return ret; |
| } |
| |
| zircon_free_partition(sub_part); |
| } |
| |
| return 0; |
| } |
| |
| static bool composite_part_is_read_redundant(const subpart_t *parts, |
| uint32_t curr_idx) |
| { |
| const subpart_t *curr = &parts[curr_idx]; |
| for (uint32_t i = 0; i < curr_idx; i++) { |
| const subpart_t *prev = &parts[i]; |
| if (curr->image_offset == prev->image_offset && |
| prev->last_read_size == curr->last_read_size) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| static int composite_part_read(const zircon_partition *part, uint64_t offset, |
| void *buffer, size_t length) |
| { |
| if (offset) { |
| fprintf(stderr, |
| "composite partition does not support offset.\n"); |
| return -1; |
| } |
| |
| for (uint32_t i = 0; i < part->data->sub_parts_count; i++) { |
| const subpart_t *sub = &part->data->sub_parts[i]; |
| zircon_partition *sub_part = NULL; |
| |
| /* Have to cast away the const on |last_read_size| so we can update it |
| here. This is OK since it's just internal bookkeeping, and no callers |
| should be relying on it. */ |
| if (composite_get_part_and_op_size(sub, length, &sub_part, |
| (uint32_t *)&sub->last_read_size)) { |
| zircon_free_partition(sub_part); |
| return -1; |
| } |
| |
| if (composite_part_is_read_redundant(part->data->sub_parts, |
| i)) { |
| printf("skip reading %s\n", sub->name); |
| zircon_free_partition(sub_part); |
| continue; |
| } |
| |
| int ret = |
| sub_part->read(sub_part, 0, |
| &((uint8_t *)buffer)[sub->image_offset], |
| sub->last_read_size); |
| if (ret) { |
| zircon_free_partition(sub_part); |
| return ret; |
| } |
| |
| zircon_free_partition(sub_part); |
| } |
| |
| return 0; |
| } |
| |
| static int composite_part_erase(const zircon_partition *part) |
| { |
| for (uint32_t i = 0; i < part->data->sub_parts_count; i++) { |
| const subpart_t *sub = &part->data->sub_parts[i]; |
| zircon_partition *sub_part = NULL; |
| uint32_t image_size; |
| |
| if (composite_get_part_and_op_size(sub, 0, &sub_part, |
| &image_size)) { |
| zircon_free_partition(sub_part); |
| return -1; |
| } |
| |
| int ret = sub_part->erase(sub_part); |
| if (ret) { |
| zircon_free_partition(sub_part); |
| return ret; |
| } |
| |
| zircon_free_partition(sub_part); |
| } |
| |
| return 0; |
| } |
| |
| static bool is_partition_composite(const char *name) |
| { |
| for (int i = 0; i < zircon_partition_map_count; i++) { |
| if (strcmp(name, zircon_partition_map[i].name)) { |
| continue; |
| } |
| return zircon_partition_map[i].type == |
| ZIRCON_PART_TYPE_COMPOSITE; |
| } |
| return false; |
| } |
| |
| static int find_zircon_partition(const char *name, |
| zircon_partition_data_t *out_zircon_part); |
| |
| static int determine_composite_partition_size(zircon_partition_data_t *part) |
| { |
| // Determine a safe size for reading |
| part->size = 0; |
| bool image_offset_start_from_0 = false; |
| for (uint32_t i = 0; i < part->sub_parts_count; i++) { |
| const subpart_t *sub = &part->sub_parts[i]; |
| if (is_partition_composite(sub->name)) { |
| fprintf(stderr, |
| "Error: Nested composite partition is not allowed: " |
| "%s contains %s\n", |
| part->name, sub->name); |
| return -1; |
| } |
| size_t sub_part_size = sub->image_size; |
| if (sub_part_size == 0) { |
| zircon_partition_data_t sub_part; |
| if (find_zircon_partition(sub->name, &sub_part)) { |
| fprintf(stderr, |
| "Error: Failed to find sub-partition %s\n", |
| sub->name); |
| } |
| sub_part_size = sub_part.size; |
| } |
| part->size = max((size_t)part->size, sub->image_offset + sub_part_size); |
| image_offset_start_from_0 = |
| image_offset_start_from_0 || (!sub->image_offset); |
| } |
| if (!image_offset_start_from_0) { |
| fprintf(stderr, "There must be a sub-partition with 0 " |
| "image_offset\n"); |
| return -1; |
| } |
| return 0; |
| } |
| |
| /** |
| * find_zircon_partition() - Finds zircon partition. |
| * |
| * @name: Zircon partition name |
| * @out_zircon_part: Outputs zircon partition |
| * |
| * If the partition name exists in the GPT and there does not exist an entry in |
| * the hardcoded partition map with `prioritize_over_gpt` enabled, the GPT |
| * partition is used. |
| * |
| * Otherwise, a `zircon_partition_data_t` is created from the hardcoded translation |
| * layer. |
| * |
| * Return: 0 if OK, non-zero value on error. |
| */ |
| static int find_zircon_partition(const char *name, |
| zircon_partition_data_t *out_zircon_part) |
| { |
| const zircon_partition_data_t *zircon_part = NULL; |
| |
| // The partition name must be able to fit in PART_NAME_LEN, including the |
| // null terminator. |
| if (strlen(name) + 1 > PART_NAME_LEN) { |
| fprintf(stderr, "Error: partition name '%s' is too long\n", |
| name); |
| return -1; |
| } |
| |
| for (int i = 0; i < zircon_partition_map_count; i++) { |
| if (strcmp(name, zircon_partition_map[i].name)) { |
| continue; |
| } |
| zircon_part = &zircon_partition_map[i]; |
| } |
| |
| struct mmc *mmc = find_mmc_device(CONFIG_FASTBOOT_FLASH_MMC_DEV); |
| if (!mmc) { |
| fprintf(stderr, "Error: No MMC device found\n"); |
| return -1; |
| } |
| |
| struct blk_desc *blk_desc = mmc_get_blk_desc(mmc); |
| if (!blk_desc) { |
| fprintf(stderr, "Error: mmc_get_blk_desc failed\n"); |
| return -1; |
| } |
| |
| if (store_set_device(BOOT_EMMC)) { |
| fprintf(stderr, "Error: Unable to set storage device\n"); |
| return -1; |
| } |
| |
| struct partitions *part_info = find_mmc_partition_by_name(name); |
| |
| // If `name` is in `zircon_partition_map` and the GPT, check if it is |
| // prioritized over the GPT partition. |
| if (part_info && (!zircon_part || |
| (zircon_part && !zircon_part->prioritize_over_gpt))) { |
| strncpy(out_zircon_part->name, name, PART_NAME_LEN); |
| out_zircon_part->type = ZIRCON_PART_TYPE_GPT_EMMC_USER; |
| out_zircon_part->fastboot_locked_access = |
| (zircon_part ? zircon_part->fastboot_locked_access : 0); |
| strncpy(out_zircon_part->gpt_name, name, PART_NAME_LEN); |
| out_zircon_part->gpt_offset = 0; |
| out_zircon_part->abs_offset = part_info->offset; |
| out_zircon_part->size = part_info->size; |
| out_zircon_part->mmc = mmc; |
| out_zircon_part->blk_desc = blk_desc; |
| |
| return 0; |
| } |
| |
| if (!zircon_part) { |
| return -1; |
| } |
| |
| memcpy(out_zircon_part, zircon_part, sizeof(zircon_partition_data_t)); |
| |
| out_zircon_part->mmc = mmc; |
| out_zircon_part->blk_desc = blk_desc; |
| |
| switch (zircon_part->type) { |
| case ZIRCON_PART_TYPE_GPT_EMMC_USER: { |
| part_info = find_mmc_partition_by_name(zircon_part->gpt_name); |
| |
| if (!part_info) { |
| fprintf(stderr, |
| "Error: unable to find mapped gpt partition: %s, zircon partition: %s\n", |
| zircon_part->gpt_name, zircon_part->name); |
| return -1; |
| } |
| |
| if (zircon_part->gpt_offset + zircon_part->size > |
| part_info->size) { |
| fprintf(stderr, |
| "Error: zircon partition, %s, exceeds its parent gpt partition, %s", |
| zircon_part->name, zircon_part->gpt_name); |
| return -1; |
| } |
| |
| if (out_zircon_part->size == 0) { |
| out_zircon_part->size = |
| part_info->size - out_zircon_part->gpt_offset; |
| } |
| out_zircon_part->abs_offset = |
| part_info->offset + zircon_part->gpt_offset; |
| |
| return 0; |
| } |
| case ZIRCON_PART_TYPE_GPT: |
| case ZIRCON_PART_TYPE_BOOTLOADER: |
| case ZIRCON_PART_TYPE_RAW_EMMC: { |
| if (set_hwpart(blk_desc, zircon_part->hw_part)) { |
| return -1; |
| } |
| |
| if (zircon_part->lba_offset < 0) { |
| out_zircon_part->lba_offset += blk_desc->lba; |
| } |
| |
| if (out_zircon_part->lba_offset < 0 || |
| out_zircon_part->lba_offset > blk_desc->lba) { |
| fprintf(stderr, "Error: invalid block offset\n"); |
| reset_hwpart(blk_desc); |
| return -1; |
| } |
| |
| if (!zircon_part->lba_size) { |
| out_zircon_part->lba_size = |
| blk_desc->lba - out_zircon_part->lba_offset; |
| } |
| |
| if (out_zircon_part->lba_size > |
| blk_desc->lba - out_zircon_part->lba_offset) { |
| fprintf(stderr, "Error: lba size too large\n"); |
| reset_hwpart(blk_desc); |
| return -1; |
| } |
| |
| out_zircon_part->abs_offset = |
| out_zircon_part->lba_offset * blk_desc->blksz; |
| out_zircon_part->size = |
| out_zircon_part->lba_size * blk_desc->blksz; |
| |
| return reset_hwpart(blk_desc); |
| } |
| case ZIRCON_PART_TYPE_COMPOSITE: |
| return determine_composite_partition_size(out_zircon_part); |
| } |
| |
| return -1; |
| } |
| |
| /* === public API === */ |
| |
| zircon_partition *zircon_get_partition(const char *name) |
| { |
| zircon_partition *part = calloc(1, sizeof(zircon_partition)); |
| zircon_partition_data_t *data = |
| calloc(1, sizeof(zircon_partition_data_t)); |
| |
| if (!part || !data) { |
| fprintf(stderr, "calloc failed\n"); |
| return NULL; |
| } |
| |
| part->data = data; |
| |
| if (find_zircon_partition(name, data)) { |
| fprintf(stderr, "Error: Unable to find partition: %s\n", name); |
| zircon_free_partition(part); |
| return NULL; |
| } |
| |
| part->size = data->size; |
| |
| switch (data->type) { |
| case ZIRCON_PART_TYPE_GPT_EMMC_USER: |
| part->read = gpt_part_read; |
| part->write = gpt_part_write; |
| part->erase = gpt_part_erase; |
| break; |
| |
| case ZIRCON_PART_TYPE_RAW_EMMC: |
| part->read = raw_emmc_read; |
| part->write = raw_emmc_write; |
| part->erase = raw_emmc_erase; |
| break; |
| |
| case ZIRCON_PART_TYPE_BOOTLOADER: |
| part->read = raw_emmc_read; |
| part->write = bootloader_write; |
| part->erase = bootloader_erase; |
| break; |
| |
| case ZIRCON_PART_TYPE_GPT: |
| part->read = raw_emmc_read; |
| part->write = gpt_write; |
| part->erase = gpt_erase; |
| break; |
| |
| case ZIRCON_PART_TYPE_COMPOSITE: |
| part->read = composite_part_read; |
| part->write = composite_part_write; |
| part->erase = composite_part_erase; |
| break; |
| } |
| |
| return part; |
| } |
| |
| zircon_partition *zircon_get_fastboot_partition(const char *name) |
| { |
| bool unlocked; |
| if (zircon_vboot_is_unlocked(&unlocked)) { |
| fprintf(stderr, "Failed to get current lock/unlock state\n"); |
| return NULL; |
| } |
| |
| zircon_partition *part = zircon_get_partition(name); |
| if (!part) { |
| return NULL; |
| } |
| |
| // If we are not in dev build, check if access is dev only. |
| #if !defined(DEV_BUILD_CONFIG) |
| if (part->data->fastboot_locked_access & |
| ZIRCON_PARTITION_ACCESS_DEV_ONLY) { |
| part->write = NULL; |
| part->erase = NULL; |
| return part; |
| } |
| #endif |
| |
| if (unlocked) { |
| return part; |
| } |
| |
| if (part->data->fastboot_locked_access & |
| ZIRCON_PARTITION_ACCESS_FLAG_WRITE) { |
| return part; |
| } |
| part->write = NULL; |
| |
| if (part->data->fastboot_locked_access & |
| ZIRCON_PARTITION_ACCESS_FLAG_ERASE) { |
| return part; |
| } |
| part->erase = NULL; |
| |
| return part; |
| } |
| |
| void zircon_free_partition(zircon_partition *part) |
| { |
| if (part) { |
| free(part->data); |
| free(part); |
| } |
| } |