| /* |
| * Copyright (c) 2019 The Fuchsia Authors |
| */ |
| |
| #include <common.h> |
| |
| #include <aml_i2c.h> |
| #include <asm/arch/efuse.h> |
| #include <asm/arch/secure_apb.h> |
| #include <libavb/libavb.h> |
| #include <libavb_atx/libavb_atx.h> |
| #include <zircon-estelle/partition.h> |
| #include <zircon-estelle/vboot.h> |
| #include <zircon-estelle/zircon.h> |
| |
| /* Chosen to be generous but still require a huge number of increase operations |
| * before exhausting the 64 available fuses. |
| */ |
| static const uint64_t kRollbackIndexIncreaseThreshold = 1000000000; |
| |
| /* By convention, when a rollback index is not used, the value remains zero. */ |
| static const uint64_t kRollbackIndexNotUsed = 0; |
| |
| typedef struct _AvbAtxContext { |
| size_t key_version_location[AVB_MAX_NUMBER_OF_ROLLBACK_INDEX_LOCATIONS]; |
| uint64_t key_version_value[AVB_MAX_NUMBER_OF_ROLLBACK_INDEX_LOCATIONS]; |
| size_t next_key_version_slot; |
| |
| unsigned char *loadaddr; |
| size_t loadsize; |
| } AvbAtxContext; |
| |
| typedef struct _AvbAtxOpsWithContext { |
| AvbAtxOps atx_ops; |
| AvbAtxContext context; |
| } AvbAtxOpsWithContext; |
| |
| /* Returns context associated with |atx_ops| returned by |
| * setup_ops_with_context(). |
| */ |
| static AvbAtxContext *get_ops_context(AvbAtxOps *atx_ops) |
| { |
| return &((AvbAtxOpsWithContext *)atx_ops)->context; |
| } |
| |
| /* If a negative offset is given, computes the unsigned offset. */ |
| static inline uint64_t calc_offset(uint64_t size, int64_t offset) |
| { |
| if (offset < 0) { |
| return size + offset; |
| } |
| return offset; |
| } |
| |
| static AvbIOResult read_from_partition(AvbOps *ops, const char *partition, |
| int64_t offset, size_t num_bytes, |
| void *buffer, size_t *out_num_read) |
| { |
| int rc; |
| uint64_t size, abs_offset; |
| printf("avb: read: %s %lu bytes, offset %lld\n", partition, num_bytes, |
| offset); |
| rc = zircon_get_partititon_size(partition, &size); |
| if (rc) { |
| return AVB_IO_RESULT_ERROR_NO_SUCH_PARTITION; |
| } |
| abs_offset = calc_offset(size, offset); |
| if (abs_offset > size) { |
| return AVB_IO_RESULT_ERROR_RANGE_OUTSIDE_PARTITION; |
| } |
| if (abs_offset + num_bytes > size) { |
| num_bytes = size - abs_offset; |
| } |
| |
| rc = zircon_partition_read(partition, abs_offset, |
| (unsigned char *)buffer, num_bytes); |
| if (rc) { |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| *out_num_read = num_bytes; |
| return AVB_IO_RESULT_OK; |
| } |
| |
| static AvbIOResult get_preloaded_partition(AvbOps *ops, const char *partition, |
| size_t num_bytes, |
| uint8_t **out_pointer, |
| size_t *out_num_bytes_preloaded) |
| { |
| *out_pointer = NULL; |
| *out_num_bytes_preloaded = 0; |
| |
| // ZBI image is preloaded. |
| if (!strncmp(partition, ZIRCON_PARTITION_PREFIX, |
| strlen(ZIRCON_PARTITION_PREFIX))) { |
| AvbAtxContext *context = get_ops_context(ops->atx_ops); |
| |
| if (context->loadsize < num_bytes) { |
| printf("error: image is too large load (%zu > %zu)\n", |
| num_bytes, context->loadsize); |
| return AVB_IO_RESULT_ERROR_INSUFFICIENT_SPACE; |
| } |
| |
| AvbIOResult result = |
| read_from_partition(ops, partition, 0, num_bytes, |
| context->loadaddr, |
| out_num_bytes_preloaded); |
| |
| // Only set |out_pointer| on success, otherwise AVB will try to free it |
| // when cleaning up. |
| if (result == AVB_IO_RESULT_OK) { |
| *out_pointer = context->loadaddr; |
| } |
| return result; |
| } |
| return AVB_IO_RESULT_OK; |
| } |
| |
| static AvbIOResult write_to_partition(AvbOps *ops, const char *partition, |
| int64_t offset, size_t num_bytes, |
| const void *buffer) |
| { |
| int rc; |
| uint64_t size, abs_offset; |
| printf("avb: write: %s %lu bytes, offset %lld: payload: %s\n", |
| partition, num_bytes, offset, (char *)buffer); |
| rc = zircon_get_partititon_size(partition, &size); |
| if (rc) { |
| return AVB_IO_RESULT_ERROR_NO_SUCH_PARTITION; |
| } |
| abs_offset = calc_offset(size, offset); |
| if (abs_offset + num_bytes > size) { |
| return AVB_IO_RESULT_ERROR_RANGE_OUTSIDE_PARTITION; |
| } |
| |
| rc = zircon_partition_write(partition, abs_offset, |
| (unsigned char *)buffer, num_bytes); |
| if (rc) { |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| return AVB_IO_RESULT_OK; |
| } |
| |
| static AvbIOResult read_is_device_unlocked(AvbOps *ops, bool *out_is_unlocked) |
| { |
| *out_is_unlocked = (zircon_is_vboot_enabled() == false); |
| return AVB_IO_RESULT_OK; |
| } |
| |
| /* avb_slot_verify uses this call to check that a partition exists. |
| * Checks for existence but ignores GUID because it's unused. */ |
| static AvbIOResult get_unique_guid_for_partition(AvbOps *ops, |
| const char *partition, |
| char *guid_buf, |
| size_t guid_buf_size) |
| { |
| uint64_t size; |
| int rc = zircon_get_partititon_size(partition, &size); |
| if (rc) { |
| return AVB_IO_RESULT_ERROR_NO_SUCH_PARTITION; |
| } |
| |
| guid_buf[0] = '\0'; |
| return AVB_IO_RESULT_OK; |
| } |
| |
| static AvbIOResult get_size_of_partition(AvbOps *ops, const char *partition, |
| uint64_t *out_size_num_bytes) |
| { |
| int rc = zircon_get_partititon_size(partition, out_size_num_bytes); |
| if (rc) { |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| return AVB_IO_RESULT_OK; |
| } |
| |
| /* Estelle doesn't need persistent value support because |
| * it doesn't support fastboot and lock/unlock. */ |
| static AvbIOResult read_persistent_value(AvbOps *ops, const char *name, |
| size_t buffer_size, |
| uint8_t *out_buffer, |
| size_t *out_num_bytes_read) |
| { |
| printf("WARNING: avb: UNIMPLEMENTED: [%s:L%d]\n", __func__, __LINE__); |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| |
| static AvbIOResult write_persistent_value(AvbOps *ops, const char *name, |
| size_t value_size, |
| const uint8_t *value) |
| { |
| printf("WARNING: avb: UNIMPLEMENTED: [%s:L%d]\n", __func__, __LINE__); |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| |
| /* bitmask for determining whether the RNG_USR_DATA register is ready. |
| * This mask should be applied to the RNG_USR_STS register. |
| * 0: The RNG_USR_DATA register is not ready to be read from. |
| * 1: The RNG_USR_DATA register is ready to be read from. |
| */ |
| #define USR_RAND32_RDY 0x1 |
| |
| /* Deadline for time waiting for the RNG_USR_STS register to be ready. |
| * This is a very generous value, given that after reading from |
| * the hw rng, we should expect it to be available after |
| * HW_RNG_RESEEDING_INTERVAL_MICROS. |
| */ |
| #define ENTROPY_COLLECTION_DEADLINE_MICROS 100000 |
| |
| /* HW RNG is reseeded every 40 microseconds. */ |
| #define HW_RNG_RESEEDING_INTERVAL_MICROS 40 |
| |
| /* Not needed for Estelle because lock/unlock isn't supported. */ |
| static AvbIOResult get_random(AvbAtxOps *atx_ops, size_t num_bytes, |
| uint8_t *output) |
| { |
| uint32_t elapsed_time_us = 0; |
| size_t offset = 0; |
| int i = 0; |
| |
| if ((num_bytes % 4) != 0) { |
| printf("ERROR: num bytes has to be multiple 4\n"); |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| |
| if (!num_bytes || !output) { |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| |
| for (i = 0; i < num_bytes / sizeof(uint32_t); i++) { |
| // Reading a 1 in the RNG_USR_STS means that the |
| // hw rng has been reseeded. Wait until we see a 1, |
| // without exceeding the global deadline. |
| while ((readl(P_RNG_USR_STS) & USR_RAND32_RDY) != 1) { |
| udelay(1); |
| elapsed_time_us++; |
| if (elapsed_time_us > |
| ENTROPY_COLLECTION_DEADLINE_MICROS) { |
| return AVB_IO_RESULT_ERROR_IO; |
| } |
| } |
| |
| uint32_t rnd = readl(P_RNG_USR_DATA); |
| memcpy(output + offset, (void *)&rnd, sizeof(rnd)); |
| offset += sizeof(rnd); |
| |
| // According to the docs, this should guarantee a reseed. |
| udelay(HW_RNG_RESEEDING_INTERVAL_MICROS); |
| } |
| |
| return AVB_IO_RESULT_OK; |
| } |
| |
| /* An implementation of AvbAtxOps::set_key_version that saves the key version |
| * information to ops context data. |
| */ |
| static void save_key_version_to_context(AvbAtxOps *atx_ops, |
| size_t rollback_index_location, |
| uint64_t key_version) |
| { |
| AvbAtxContext *context = get_ops_context(atx_ops); |
| size_t offset = context->next_key_version_slot++; |
| if (offset < AVB_MAX_NUMBER_OF_ROLLBACK_INDEX_LOCATIONS) { |
| context->key_version_location[offset] = rollback_index_location; |
| context->key_version_value[offset] = key_version; |
| } else { |
| printf("ERROR: wrong offset for rollback index: %zd\n", offset); |
| } |
| } |
| |
| /* Attaches context data to |existing_ops| and returns new ops. The |
| * |ops_with_context| will be used to store the new combined ops and context. |
| * The set_key_version function will be replaced in order to collect the key |
| * version information in the context. |
| */ |
| static AvbAtxOps *setup_ops_with_context(unsigned char *loadaddr, |
| size_t loadsize, |
| const AvbAtxOps *existing_ops, |
| AvbAtxOpsWithContext *ops_with_context) |
| { |
| avb_memset(ops_with_context, 0, sizeof(AvbAtxOpsWithContext)); |
| ops_with_context->atx_ops = *existing_ops; |
| // Close the loop on the circular reference. |
| ops_with_context->atx_ops.ops->atx_ops = &ops_with_context->atx_ops; |
| ops_with_context->atx_ops.set_key_version = save_key_version_to_context; |
| ops_with_context->context.loadaddr = loadaddr; |
| ops_with_context->context.loadsize = loadsize; |
| return &ops_with_context->atx_ops; |
| } |
| |
| /* Updates the stored rollback index value for |location| to match |value|. */ |
| static AvbSlotVerifyResult update_rollback_index(AvbOps *ops, size_t location, |
| uint64_t value) |
| { |
| AvbIOResult io_result = AVB_IO_RESULT_OK; |
| uint64_t current_value; |
| io_result = ops->read_rollback_index(ops, location, ¤t_value); |
| if (io_result == AVB_IO_RESULT_ERROR_OOM) { |
| return AVB_SLOT_VERIFY_RESULT_ERROR_OOM; |
| } else if (io_result != AVB_IO_RESULT_OK) { |
| avb_error("Error getting rollback index for slot.\n"); |
| return AVB_SLOT_VERIFY_RESULT_ERROR_IO; |
| } |
| if (current_value == value) { |
| // No update necessary. |
| return AVB_SLOT_VERIFY_RESULT_OK; |
| } |
| // The difference between the new and current value must not exceed the |
| // increase threshold, and the value must not decrease. |
| if (value - current_value > kRollbackIndexIncreaseThreshold) { |
| avb_error( |
| "Rollback index value cannot increase beyond the threshold.\n"); |
| return AVB_SLOT_VERIFY_RESULT_ERROR_ROLLBACK_INDEX; |
| } |
| // This should have been checked during verification, but check again here as |
| // a safeguard. |
| if (value < current_value) { |
| avb_error("Rollback index value cannot decrease.\n"); |
| return AVB_SLOT_VERIFY_RESULT_ERROR_ROLLBACK_INDEX; |
| } |
| io_result = ops->write_rollback_index(ops, location, value); |
| if (io_result == AVB_IO_RESULT_ERROR_OOM) { |
| return AVB_SLOT_VERIFY_RESULT_ERROR_OOM; |
| } else if (io_result != AVB_IO_RESULT_OK) { |
| avb_error("Error setting stored rollback index.\n"); |
| return AVB_SLOT_VERIFY_RESULT_ERROR_IO; |
| } |
| return AVB_SLOT_VERIFY_RESULT_OK; |
| } |
| |
| static AvbOps ops; |
| |
| extern AvbIOResult |
| avb_read_permanent_attributes(AvbAtxOps *atx_ops, |
| AvbAtxPermanentAttributes *attributes); |
| AvbIOResult avb_read_rollback_index(AvbOps *ops, size_t rollback_index_location, |
| uint64_t *out_rollback_index); |
| |
| AvbIOResult avb_write_rollback_index(AvbOps *ops, |
| size_t rollback_index_location, |
| uint64_t rollback_index); |
| |
| AvbIOResult |
| avb_read_permanent_attributes_hash(AvbAtxOps *atx_ops, |
| uint8_t hash[AVB_SHA256_DIGEST_SIZE]); |
| |
| static AvbAtxOps atx_ops = { |
| .ops = &ops, |
| .read_permanent_attributes = avb_read_permanent_attributes, |
| .read_permanent_attributes_hash = avb_read_permanent_attributes_hash, |
| .set_key_version = save_key_version_to_context, |
| .get_random = get_random, |
| }; |
| |
| static AvbOps ops = { |
| .atx_ops = &atx_ops, |
| .read_from_partition = read_from_partition, |
| .get_preloaded_partition = get_preloaded_partition, |
| .write_to_partition = write_to_partition, |
| .validate_vbmeta_public_key = avb_atx_validate_vbmeta_public_key, |
| .read_rollback_index = avb_read_rollback_index, |
| .write_rollback_index = avb_write_rollback_index, |
| .read_is_device_unlocked = read_is_device_unlocked, |
| .get_unique_guid_for_partition = get_unique_guid_for_partition, |
| .get_size_of_partition = get_size_of_partition, |
| .read_persistent_value = read_persistent_value, |
| .write_persistent_value = write_persistent_value, |
| }; |
| |
| AvbSlotVerifyResult |
| zircon_vboot_slot_verify(unsigned char *loadaddr, size_t loadsize, |
| const char *const *requested_partitions, |
| const char *ab_suffix, AvbAtxLockState lock_state, |
| AvbAtxSlotState slot_state, |
| AvbSlotVerifyData **verify_data) |
| { |
| AvbAtxOpsWithContext ops_with_context; |
| AvbSlotVerifyResult result = AVB_SLOT_VERIFY_RESULT_OK; |
| int i; |
| |
| AvbAtxOps *atx_ctx_ops = setup_ops_with_context( |
| loadaddr, loadsize, &atx_ops, &ops_with_context); |
| |
| result = avb_slot_verify( |
| atx_ctx_ops->ops, requested_partitions, ab_suffix, |
| (lock_state == AVB_ATX_UNLOCKED) ? |
| AVB_SLOT_VERIFY_FLAGS_ALLOW_VERIFICATION_ERROR : |
| AVB_SLOT_VERIFY_FLAGS_NONE, |
| AVB_HASHTREE_ERROR_MODE_EIO, verify_data); |
| |
| if (result != AVB_SLOT_VERIFY_RESULT_OK || |
| lock_state == AVB_ATX_UNLOCKED) { |
| return result; |
| } |
| |
| /* Increase rollback index values to match the verified slot. */ |
| if (slot_state == AVB_ATX_SLOT_MARKED_SUCCESSFUL) { |
| for (i = 0; i < AVB_MAX_NUMBER_OF_ROLLBACK_INDEX_LOCATIONS; |
| i++) { |
| uint64_t rollback_index_value = |
| (*verify_data)->rollback_indexes[i]; |
| if (rollback_index_value != kRollbackIndexNotUsed) { |
| result = update_rollback_index( |
| atx_ctx_ops->ops, i, |
| rollback_index_value); |
| if (result != AVB_SLOT_VERIFY_RESULT_OK) { |
| avb_slot_verify_data_free(*verify_data); |
| *verify_data = NULL; |
| return result; |
| } |
| } |
| } |
| |
| /* Also increase rollback index values for Fuchsia key version locations. |
| */ |
| for (i = 0; i < AVB_MAX_NUMBER_OF_ROLLBACK_INDEX_LOCATIONS; |
| i++) { |
| size_t rollback_index_location = |
| ops_with_context.context.key_version_location[i]; |
| uint64_t rollback_index_value = |
| ops_with_context.context.key_version_value[i]; |
| if (rollback_index_value != kRollbackIndexNotUsed) { |
| result = update_rollback_index( |
| atx_ctx_ops->ops, |
| rollback_index_location, |
| rollback_index_value); |
| if (result != AVB_SLOT_VERIFY_RESULT_OK) { |
| avb_slot_verify_data_free(*verify_data); |
| *verify_data = NULL; |
| return result; |
| } |
| } |
| } |
| } |
| |
| return result; |
| } |
| |
| int zircon_vboot_generate_unlock_challenge(AvbAtxUnlockChallenge *out) |
| { |
| AvbIOResult ret = avb_atx_generate_unlock_challenge(&atx_ops, out); |
| if (ret != AVB_IO_RESULT_OK) { |
| avb_error("Failed to generate unlock challenge\n"); |
| return __LINE__; |
| } |
| |
| return 0; |
| } |
| |
| int zircon_vboot_validate_unlock_credential(AvbAtxUnlockCredential *in, |
| bool *out_is_trusted) |
| { |
| AvbIOResult ret = avb_atx_validate_unlock_credential(&atx_ops, in, |
| out_is_trusted); |
| if (ret != AVB_IO_RESULT_OK) { |
| avb_error("Failed to generate unlock challenge\n"); |
| return __LINE__; |
| } |
| |
| return 0; |
| } |