diff --git a/README.md b/README.md index e68b4eb6..ebc0dd6b 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,7 @@ A portable and easy-to-integrate implementation of the Advanced Configuration an > [!WARNING] > Not yet ready for production use! While the project is mostly feature-complete, > it is still under active development. Public API may change, get added or -> removed without notice. Thread safety is currently lacking, see -> [#74](/../../issues/74) for more info & progress. +> removed without notice. ## Features @@ -27,6 +26,7 @@ A portable and easy-to-integrate implementation of the Advanced Configuration an - Client-defined Notify() handlers - Firmware global lock management (_GL, locked fields, public API) - GAS read/write API +- Fully thread safe ## Why would I use this over ACPICA? diff --git a/include/uacpi/event.h b/include/uacpi/event.h index a8e4c54f..7d005a99 100644 --- a/include/uacpi/event.h +++ b/include/uacpi/event.h @@ -111,7 +111,7 @@ uacpi_status uacpi_install_gpe_handler( * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE */ UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( - uacpi_status uacpi_install_gpe_handler_raw( +uacpi_status uacpi_install_gpe_handler_raw( uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx )) @@ -151,7 +151,6 @@ uacpi_status uacpi_disable_gpe_for_wake( uacpi_namespace_node *gpe_device, uacpi_u16 idx )) - /* * Finalize GPE initialization by enabling all GPEs not configured for wake and * having a matching AML handler detected. diff --git a/include/uacpi/internal/event.h b/include/uacpi/internal/event.h index 0e786f77..40ced0db 100644 --- a/include/uacpi/internal/event.h +++ b/include/uacpi/internal/event.h @@ -5,6 +5,10 @@ // This fixed event is internal-only, and we don't expose it in the enum #define UACPI_FIXED_EVENT_GLOBAL_LOCK 0 +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_initialize_events_early(void) +) + UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( uacpi_status uacpi_initialize_events(void) ) @@ -12,8 +16,8 @@ UACPI_STUB_IF_REDUCED_HARDWARE( void uacpi_deinitialize_events(void) ) -UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( - uacpi_status uacpi_events_match_post_dynamic_table_load(void) +UACPI_STUB_IF_REDUCED_HARDWARE( + void uacpi_events_match_post_dynamic_table_load(void) ) UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( diff --git a/include/uacpi/internal/namespace.h b/include/uacpi/internal/namespace.h index f76d88e0..a9cba1cb 100644 --- a/include/uacpi/internal/namespace.h +++ b/include/uacpi/internal/namespace.h @@ -112,8 +112,8 @@ uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node); uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node); uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node); -uacpi_status uacpi_namespace_read_lock(); -uacpi_status uacpi_namespace_read_unlock(); +uacpi_status uacpi_namespace_read_lock(void); +uacpi_status uacpi_namespace_read_unlock(void); -uacpi_status uacpi_namespace_write_lock(); -uacpi_status uacpi_namespace_write_unlock(); +uacpi_status uacpi_namespace_write_lock(void); +uacpi_status uacpi_namespace_write_unlock(void); diff --git a/include/uacpi/internal/registers.h b/include/uacpi/internal/registers.h index 6ad28e71..7db00613 100644 --- a/include/uacpi/internal/registers.h +++ b/include/uacpi/internal/registers.h @@ -2,6 +2,9 @@ #include +uacpi_status uacpi_ininitialize_registers(void); +void uacpi_deininitialize_registers(void); + enum uacpi_register { UACPI_REGISTER_PM1_STS = 0, UACPI_REGISTER_PM1_EN, diff --git a/include/uacpi/kernel_api.h b/include/uacpi/kernel_api.h index 54aaaa6e..a8240775 100644 --- a/include/uacpi/kernel_api.h +++ b/include/uacpi/kernel_api.h @@ -293,7 +293,11 @@ uacpi_status uacpi_kernel_schedule_work( ); /* - * Blocks until all scheduled work is complete and the work queue becomes empty. + * Waits for two types of work to finish: + * 1. All in-flight interrupts installed via uacpi_kernel_install_interrupt_handler + * 2. All work scheduled via uacpi_kernel_schedule_work + * + * Note that the waits must be done in this order specifically. */ uacpi_status uacpi_kernel_wait_for_work_completion(void); diff --git a/source/event.c b/source/event.c index 7f56fd07..d30c31b5 100644 --- a/source/event.c +++ b/source/event.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #define UACPI_EVENT_DISABLED 0 @@ -14,6 +15,10 @@ #ifndef UACPI_REDUCED_HARDWARE +static uacpi_handle g_gpe_state_slock; +static struct uacpi_recursive_lock g_event_lock; +static uacpi_bool g_gpes_finalized; + struct fixed_event { uacpi_u8 enable_field; uacpi_u8 status_field; @@ -102,34 +107,61 @@ static uacpi_status set_event(uacpi_u8 event, uacpi_u8 value) uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event) { - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + /* * Attempting to enable an event that doesn't have a handler is most likely * an error, don't allow it. */ - if (uacpi_unlikely(fixed_event_handlers[event].handler == UACPI_NULL)) - return UACPI_STATUS_NO_HANDLER; + if (uacpi_unlikely(fixed_event_handlers[event].handler == UACPI_NULL)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } + + ret = set_event(event, UACPI_EVENT_ENABLED); - return set_event(event, UACPI_EVENT_ENABLED); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event) { - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; - return set_event(event, UACPI_EVENT_DISABLED); + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = set_event(event, UACPI_EVENT_DISABLED); + + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event) { - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; @@ -286,9 +318,9 @@ struct gpe_interrupt_ctx { uacpi_handle irq_handle; uacpi_u32 irq; }; -static struct gpe_interrupt_ctx *gpe_interrupt_head; +static struct gpe_interrupt_ctx *g_gpe_interrupt_head; -uacpi_u8 gpe_get_mask(struct gp_event *event) +static uacpi_u8 gpe_get_mask(struct gp_event *event) { return 1 << (event->idx - event->reg->base_idx); } @@ -305,9 +337,10 @@ static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) struct gpe_register *reg = event->reg; uacpi_u64 enable_mask; uacpi_u8 event_bit; + uacpi_cpu_flags flags; event_bit = gpe_get_mask(event); - if (reg->masked_mask & event_bit) + if (state != GPE_STATE_DISABLED && (reg->masked_mask & event_bit)) return UACPI_STATUS_OK; if (state == GPE_STATE_ENABLED_CONDITIONALLY) { @@ -317,9 +350,11 @@ static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) state = GPE_STATE_ENABLED; } + flags = uacpi_kernel_lock_spinlock(g_gpe_state_slock); + ret = uacpi_gas_read(®->enable, &enable_mask); if (uacpi_unlikely_error(ret)) - return ret; + goto out; switch (state) { case GPE_STATE_ENABLED: @@ -329,10 +364,14 @@ static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) enable_mask &= ~event_bit; break; default: - return UACPI_STATUS_INVALID_ARGUMENT; + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; } - return uacpi_gas_write(®->enable, enable_mask); + ret = uacpi_gas_write(®->enable, enable_mask); +out: + uacpi_kernel_unlock_spinlock(g_gpe_state_slock, flags); + return ret; } static uacpi_status clear_gpe(struct gp_event *event) @@ -598,7 +637,7 @@ static uacpi_status find_or_create_gpe_interrupt_ctx( ) { uacpi_status ret; - struct gpe_interrupt_ctx *entry = gpe_interrupt_head; + struct gpe_interrupt_ctx *entry = g_gpe_interrupt_head; while (entry) { if (entry->irq == irq) { @@ -628,8 +667,8 @@ static uacpi_status find_or_create_gpe_interrupt_ctx( } entry->irq = irq; - entry->next = gpe_interrupt_head; - gpe_interrupt_head = entry; + entry->next = g_gpe_interrupt_head; + g_gpe_interrupt_head = entry; *out_ctx = entry; return UACPI_STATUS_OK; @@ -649,8 +688,97 @@ static void gpe_release_implicit_notify_handlers(struct gp_event *event) event->implicit_handler = UACPI_NULL; } +enum gpe_block_action +{ + GPE_BLOCK_ACTION_DISABLE_ALL, + GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, + GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, + GPE_BLOCK_ACTION_CLEAR_ALL, +}; + +static uacpi_status gpe_block_apply_action( + struct gpe_block *block, enum gpe_block_action action +) +{ + uacpi_status ret; + uacpi_size i; + uacpi_u8 value; + struct gpe_register *reg; + + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + switch (action) { + case GPE_BLOCK_ACTION_DISABLE_ALL: + value = 0; + break; + case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME: + value = reg->runtime_mask & ~reg->masked_mask; + break; + case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE: + value = reg->wake_mask; + break; + case GPE_BLOCK_ACTION_CLEAR_ALL: + ret = uacpi_gas_write(®->status, 0xFF); + if (uacpi_unlikely_error(ret)) + return ret; + continue; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + reg->current_mask = value; + ret = uacpi_gas_write(®->enable, value); + if (uacpi_unlikely_error(ret)) + return ret; + } + + return UACPI_STATUS_OK; +} + +static void gpe_block_mask_safe(struct gpe_block *block) +{ + uacpi_size i; + struct gpe_register *reg; + + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + // No need to flush or do anything if it's not currently enabled + if (!reg->current_mask) + continue; + + // 1. Mask the GPEs, this makes sure their state is no longer modifyable + reg->masked_mask = 0xFF; + + /* + * 2. Wait for in-flight work & IRQs to finish, these might already + * be past the respective "if (masked)" check and therefore may + * try to re-enable a masked GPE. + */ + uacpi_kernel_wait_for_work_completion(); + + /* + * 3. Now that this GPE's state is unmodifyable and we know that + * currently in-flight IRQs will see the masked state, we can + * safely disable all events knowing they won't be re-enabled by + * a racing IRQ. + */ + uacpi_gas_write(®->enable, 0x00); + + /* + * 4. Wait for the last possible IRQ to finish, now that this event is + * disabled. + */ + uacpi_kernel_wait_for_work_completion(); + } +} + static void uninstall_gpe_block(struct gpe_block *block) { + if (block->registers != UACPI_NULL) + gpe_block_mask_safe(block); + if (block->prev) block->prev->next = block->next; @@ -689,18 +817,6 @@ static void uninstall_gpe_block(struct gpe_block *block) } } - if (block->registers != UACPI_NULL) { - uacpi_size i; - struct gpe_register *reg; - - for (i = 0; i < block->num_registers; ++i) { - reg = &block->registers[i]; - - if (reg->current_mask) - uacpi_gas_write(®->enable, 0x00); - } - } - if (block->events != UACPI_NULL) { uacpi_size i; struct gp_event *event; @@ -837,13 +953,18 @@ static uacpi_iteration_decision do_match_gpe_methods( return UACPI_ITERATION_DECISION_CONTINUE; } -uacpi_status uacpi_events_match_post_dynamic_table_load(void) +void uacpi_events_match_post_dynamic_table_load(void) { struct gpe_match_ctx match_ctx = { .post_dynamic_table_load = UACPI_TRUE, }; - struct gpe_interrupt_ctx *irq_ctx = gpe_interrupt_head; + uacpi_namespace_write_unlock(); + + if (uacpi_unlikely_error(uacpi_recursive_lock_acquire(&g_event_lock))) + goto out; + + struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head; while (irq_ctx) { match_ctx.block = irq_ctx->gpe_head; @@ -852,7 +973,7 @@ uacpi_status uacpi_events_match_post_dynamic_table_load(void) uacpi_namespace_do_for_each_child( match_ctx.block->device_node, do_match_gpe_methods, UACPI_NULL, UACPI_OBJECT_METHOD_BIT, UACPI_MAX_DEPTH_ANY, - UACPI_SHOULD_LOCK_NO, UACPI_PERMANENT_ONLY_YES, &match_ctx + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, &match_ctx ); match_ctx.block = match_ctx.block->next; } @@ -865,7 +986,9 @@ uacpi_status uacpi_events_match_post_dynamic_table_load(void) match_ctx.matched_count); } - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + uacpi_namespace_write_lock(); } static uacpi_status create_gpe_block( @@ -970,7 +1093,7 @@ static void for_each_gpe_block( ) { uacpi_iteration_decision decision; - struct gpe_interrupt_ctx *irq_ctx = gpe_interrupt_head; + struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head; struct gpe_block *block; while (irq_ctx) { @@ -1025,6 +1148,23 @@ static struct gp_event *get_gpe( return ctx.out_event; } +static void gp_event_toggle_masks(struct gp_event *event, uacpi_bool set_on) +{ + uacpi_u8 this_mask; + struct gpe_register *reg = event->reg; + + this_mask = gpe_get_mask(event); + + if (set_on) { + reg->runtime_mask |= this_mask; + reg->current_mask = reg->runtime_mask; + return; + } + + reg->runtime_mask &= ~this_mask; + reg->current_mask = reg->runtime_mask; +} + static uacpi_status gpe_remove_user(struct gp_event *event) { uacpi_status ret = UACPI_STATUS_OK; @@ -1033,12 +1173,13 @@ static uacpi_status gpe_remove_user(struct gp_event *event) return UACPI_STATUS_INVALID_ARGUMENT; if (--event->num_users == 0) { - event->reg->runtime_mask &= ~gpe_get_mask(event); - event->reg->current_mask = event->reg->runtime_mask; + gp_event_toggle_masks(event, UACPI_FALSE); ret = set_gpe_state(event, GPE_STATE_DISABLED); - if (uacpi_unlikely_error(ret)) + if (uacpi_unlikely_error(ret)) { + gp_event_toggle_masks(event, UACPI_TRUE); event->num_users++; + } } return ret; @@ -1062,12 +1203,13 @@ static uacpi_status gpe_add_user( if (clear_if_first == EVENT_CLEAR_IF_FIRST_YES) clear_gpe(event); - event->reg->runtime_mask |= gpe_get_mask(event); - event->reg->current_mask = event->reg->runtime_mask; + gp_event_toggle_masks(event, UACPI_TRUE); ret = set_gpe_state(event, GPE_STATE_ENABLED); - if (uacpi_unlikely_error(ret)) + if (uacpi_unlikely_error(ret)) { + gp_event_toggle_masks(event, UACPI_FALSE); event->num_users--; + } } return ret; @@ -1087,92 +1229,75 @@ const uacpi_char *uacpi_gpe_triggering_to_string( } } -static uacpi_status do_install_gpe_handler( - uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_triggering triggering, enum gpe_handler_type type, - uacpi_gpe_handler handler, uacpi_handle ctx -) +static uacpi_bool gpe_needs_polling(struct gp_event *event) { - struct gp_event *event; - struct gpe_native_handler *native_handler; + return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE; +} - if (uacpi_unlikely(triggering > UACPI_GPE_TRIGGERING_MAX)) - return UACPI_STATUS_INVALID_ARGUMENT; +static uacpi_status gpe_mask_unmask( + struct gp_event *event, uacpi_bool should_mask +) +{ + struct gpe_register *reg; + uacpi_u8 mask; - if (gpe_device == UACPI_NULL) { - gpe_device = uacpi_namespace_get_predefined( - UACPI_PREDEFINED_NAMESPACE_GPE - ); - } + reg = event->reg; + mask = gpe_get_mask(event); - event = get_gpe(gpe_device, idx); - if (uacpi_unlikely(event == UACPI_NULL)) - return UACPI_STATUS_NOT_FOUND; + if (should_mask) { + if (reg->masked_mask & mask) + return UACPI_STATUS_INVALID_ARGUMENT; - if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER || - event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) - return UACPI_STATUS_ALREADY_EXISTS; + // 1. Mask the GPE, this makes sure its state is no longer modifyable + reg->masked_mask |= mask; - native_handler = uacpi_kernel_alloc(sizeof(*native_handler)); - if (uacpi_unlikely(native_handler == UACPI_NULL)) - return UACPI_STATUS_OUT_OF_MEMORY; + /* + * 2. Wait for in-flight work & IRQs to finish, these might already + * be past the respective "if (masked)" check and therefore may + * try to re-enable a masked GPE. + */ + uacpi_kernel_wait_for_work_completion(); - native_handler->cb = handler; - native_handler->ctx = ctx; - native_handler->previous_handler = event->any_handler; - native_handler->previous_handler_type = event->handler_type; - native_handler->previous_triggering = event->triggering; - native_handler->previously_enabled = UACPI_FALSE; + /* + * 3. Now that this GPE's state is unmodifyable and we know that currently + * in-flight IRQs will see the masked state, we can safely disable this + * event knowing it won't be re-enabled by a racing IRQ. + */ + set_gpe_state(event, GPE_STATE_DISABLED); - if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || - event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && - event->num_users != 0) { - native_handler->previously_enabled = UACPI_TRUE; - gpe_remove_user(event); + /* + * 4. Wait for the last possible IRQ to finish, now that this event is + * disabled. + */ + uacpi_kernel_wait_for_work_completion(); - if (uacpi_unlikely(event->triggering != triggering)) { - uacpi_warn( - "GPE(%02X) user handler claims %s triggering, originally " - "configured as %s\n", idx, - uacpi_gpe_triggering_to_string(triggering), - uacpi_gpe_triggering_to_string(event->triggering) - ); - } + return UACPI_STATUS_OK; } - event->native_handler = native_handler; - event->handler_type = type; - event->triggering = triggering; - return UACPI_STATUS_OK; -} + if (!(reg->masked_mask & mask)) + return UACPI_STATUS_INVALID_ARGUMENT; -uacpi_status uacpi_install_gpe_handler( - uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, - uacpi_handle ctx -) -{ - return do_install_gpe_handler( - gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER, - handler, ctx - ); -} + reg->masked_mask &= ~mask; + if (!event->block_interrupts && event->num_users) + set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY); -uacpi_status uacpi_install_gpe_handler_raw( - uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, - uacpi_handle ctx -) -{ - return do_install_gpe_handler( - gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW, - handler, ctx - ); + return UACPI_STATUS_OK; } -static uacpi_bool gpe_needs_polling(struct gp_event *event) +/* + * Safely mask the event before we modify its handlers. + * + * This makes sure we can't get an IRQ in the middle of modifying this + * event's structures. + */ +static uacpi_bool gpe_mask_safe(struct gp_event *event) { - return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE; + // No need to flush or do anything if it's not currently enabled + if (!(event->reg->current_mask & gpe_get_mask(event))) + return UACPI_FALSE; + + gpe_mask_unmask(event, UACPI_TRUE); + return UACPI_TRUE; } static uacpi_iteration_decision do_initialize_gpe_block( @@ -1216,18 +1341,27 @@ static uacpi_iteration_decision do_initialize_gpe_block( uacpi_status uacpi_finalize_gpe_initialization(void) { - static uacpi_bool gpes_finalized = UACPI_FALSE; + uacpi_status ret; uacpi_bool poll_blocks = UACPI_FALSE; - if (gpes_finalized) - return UACPI_STATUS_OK; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + if (g_gpes_finalized) + goto out; + + g_gpes_finalized = UACPI_TRUE; for_each_gpe_block(do_initialize_gpe_block, &poll_blocks); if (poll_blocks) - detect_gpes(gpe_interrupt_head->gpe_head); + detect_gpes(g_gpe_interrupt_head->gpe_head); - gpes_finalized = UACPI_TRUE; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } static uacpi_status sanitize_device_and_find_gpe( @@ -1248,44 +1382,157 @@ static uacpi_status sanitize_device_and_find_gpe( return UACPI_STATUS_OK; } -uacpi_status uacpi_uninstall_gpe_handler( +static uacpi_status do_install_gpe_handler( uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_handler handler + uacpi_gpe_triggering triggering, enum gpe_handler_type type, + uacpi_gpe_handler handler, uacpi_handle ctx ) { uacpi_status ret; struct gp_event *event; struct gpe_native_handler *native_handler; + uacpi_bool did_mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); - if (uacpi_unlikely_error(ret)) - return ret; - - if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER && - event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) - return UACPI_STATUS_NOT_FOUND; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); - native_handler = event->native_handler; - if (uacpi_unlikely(native_handler->cb != handler)) + if (uacpi_unlikely(triggering > UACPI_GPE_TRIGGERING_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; - event->aml_handler = native_handler->previous_handler; - event->triggering = native_handler->previous_triggering; - event->handler_type = native_handler->previous_handler_type; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; - if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || - event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && - native_handler->previously_enabled) { - gpe_add_user(event, EVENT_CLEAR_IF_FIRST_NO); + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; - if (gpe_needs_polling(event)) - maybe_dispatch_gpe(gpe_device, event); + if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; } - uacpi_kernel_wait_for_work_completion(); - uacpi_free(native_handler, sizeof(*native_handler)); - return UACPI_STATUS_OK; -} + native_handler = uacpi_kernel_alloc(sizeof(*native_handler)); + if (uacpi_unlikely(native_handler == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + native_handler->cb = handler; + native_handler->ctx = ctx; + native_handler->previous_handler = event->any_handler; + native_handler->previous_handler_type = event->handler_type; + native_handler->previous_triggering = event->triggering; + native_handler->previously_enabled = UACPI_FALSE; + + did_mask = gpe_mask_safe(event); + + if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && + event->num_users != 0) { + native_handler->previously_enabled = UACPI_TRUE; + gpe_remove_user(event); + + if (uacpi_unlikely(event->triggering != triggering)) { + uacpi_warn( + "GPE(%02X) user handler claims %s triggering, originally " + "configured as %s\n", idx, + uacpi_gpe_triggering_to_string(triggering), + uacpi_gpe_triggering_to_string(event->triggering) + ); + } + } + + event->native_handler = native_handler; + event->handler_type = type; + event->triggering = triggering; + + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_install_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, + uacpi_handle ctx +) +{ + return do_install_gpe_handler( + gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER, + handler, ctx + ); +} + +uacpi_status uacpi_install_gpe_handler_raw( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, + uacpi_handle ctx +) +{ + return do_install_gpe_handler( + gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW, + handler, ctx + ); +} + +uacpi_status uacpi_uninstall_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_handler handler +) +{ + uacpi_status ret; + struct gp_event *event; + struct gpe_native_handler *native_handler; + uacpi_bool did_mask; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER && + event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + native_handler = event->native_handler; + if (uacpi_unlikely(native_handler->cb != handler)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + did_mask = gpe_mask_safe(event); + + event->aml_handler = native_handler->previous_handler; + event->triggering = native_handler->previous_triggering; + event->handler_type = native_handler->previous_handler_type; + + if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && + native_handler->previously_enabled) { + gpe_add_user(event, EVENT_CLEAR_IF_FIRST_NO); + } + + uacpi_free(native_handler, sizeof(*native_handler)); + + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); + + if (gpe_needs_polling(event)) + maybe_dispatch_gpe(gpe_device, event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} uacpi_status uacpi_enable_gpe( uacpi_namespace_node *gpe_device, uacpi_u16 idx @@ -1294,21 +1541,31 @@ uacpi_status uacpi_enable_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) - return UACPI_STATUS_NO_HANDLER; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } ret = gpe_add_user(event, EVENT_CLEAR_IF_FIRST_YES); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (gpe_needs_polling(event)) maybe_dispatch_gpe(gpe_device, event); - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_disable_gpe( @@ -1318,11 +1575,20 @@ uacpi_status uacpi_disable_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - return gpe_remove_user(event); + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + ret = gpe_remove_user(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_clear_gpe( @@ -1332,13 +1598,21 @@ uacpi_status uacpi_clear_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - return clear_gpe(event); -} + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + ret = clear_gpe(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} static uacpi_status gpe_suspend_resume( uacpi_namespace_node *gpe_device, uacpi_u16 idx, enum gpe_state state @@ -1347,12 +1621,21 @@ static uacpi_status gpe_suspend_resume( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + event->block_interrupts = state == GPE_STATE_DISABLED; - return set_gpe_state(event, state); + ret = set_gpe_state(event, state); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_suspend_gpe( @@ -1376,15 +1659,27 @@ uacpi_status uacpi_finish_handling_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + event = get_gpe(gpe_device, idx); - if (uacpi_unlikely(event == UACPI_NULL)) - return UACPI_STATUS_NOT_FOUND; + if (uacpi_unlikely(event == UACPI_NULL)) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + ret = restore_gpe(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; - return restore_gpe(event); } static uacpi_status gpe_get_mask_unmask( @@ -1393,32 +1688,22 @@ static uacpi_status gpe_get_mask_unmask( { uacpi_status ret; struct gp_event *event; - struct gpe_register *reg; - uacpi_u8 mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - reg = event->reg; - mask = gpe_get_mask(event); - - if (should_mask) { - if (reg->masked_mask & mask) - return UACPI_STATUS_INVALID_ARGUMENT; - - set_gpe_state(event, GPE_STATE_DISABLED); - reg->masked_mask |= mask; - return UACPI_STATUS_OK; - } + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; - if (!(reg->masked_mask & mask)) - return UACPI_STATUS_INVALID_ARGUMENT; + ret = gpe_mask_unmask(event, should_mask); - reg->masked_mask &= ~mask; - if (!event->block_interrupts && event->num_users) - set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY); - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_mask_gpe( @@ -1442,19 +1727,34 @@ uacpi_status uacpi_setup_gpe_for_wake( { uacpi_status ret; struct gp_event *event; + uacpi_bool did_mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); - if (uacpi_unlikely_error(ret)) - return ret; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); if (wake_device != UACPI_NULL) { - uacpi_object *obj; + uacpi_bool is_dev = wake_device == uacpi_namespace_root(); + + if (!is_dev) { + ret = uacpi_namespace_node_is(wake_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + } - obj = uacpi_namespace_node_get_object(wake_device); - if (wake_device != uacpi_namespace_root() && - obj->type != UACPI_OBJECT_DEVICE) + if (!is_dev) return UACPI_STATUS_INVALID_ARGUMENT; + } + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + did_mask = gpe_mask_safe(event); + + if (wake_device != UACPI_NULL) { switch (event->handler_type) { case GPE_HANDLER_TYPE_NONE: event->handler_type = GPE_HANDLER_TYPE_IMPLICIT_NOTIFY; @@ -1486,7 +1786,8 @@ uacpi_status uacpi_setup_gpe_for_wake( default: uacpi_warn("invalid GPE(%02X) handler type: %d\n", event->idx, event->handler_type); - return UACPI_STATUS_INTERNAL_ERROR; + ret = UACPI_STATUS_INTERNAL_ERROR; + goto out_unmask; } /* @@ -1500,8 +1801,10 @@ uacpi_status uacpi_setup_gpe_for_wake( implicit_handler = event->implicit_handler; while (implicit_handler) { - if (implicit_handler->device == wake_device) - return UACPI_STATUS_ALREADY_EXISTS; + if (implicit_handler->device == wake_device) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out_unmask; + } implicit_handler = implicit_handler->next; } @@ -1521,7 +1824,13 @@ uacpi_status uacpi_setup_gpe_for_wake( } event->wake = UACPI_TRUE; - return UACPI_STATUS_OK; + +out_unmask: + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } static uacpi_status gpe_enable_disable_for_wake( @@ -1533,12 +1842,20 @@ static uacpi_status gpe_enable_disable_for_wake( struct gpe_register *reg; uacpi_u8 mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - if (!event->wake) - return UACPI_STATUS_INVALID_ARGUMENT; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (!event->wake) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } reg = event->reg; mask = gpe_get_mask(event); @@ -1548,7 +1865,9 @@ static uacpi_status gpe_enable_disable_for_wake( else reg->wake_mask &= mask; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_enable_gpe_for_wake( @@ -1565,13 +1884,6 @@ uacpi_status uacpi_disable_gpe_for_wake( return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_FALSE); } -enum gpe_block_action { - GPE_BLOCK_ACTION_DISABLE_ALL, - GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, - GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, - GPE_BLOCK_ACTION_CLEAR_ALL, -}; - struct do_for_all_gpes_ctx { enum gpe_block_action action; uacpi_status ret; @@ -1582,39 +1894,28 @@ static uacpi_iteration_decision do_for_all_gpes( ) { struct do_for_all_gpes_ctx *ctx = opaque; - struct gpe_register *reg; - uacpi_u8 value; - uacpi_size i; - for (i = 0; i < block->num_registers; ++i) { - reg = &block->registers[i]; + ctx->ret = gpe_block_apply_action(block, ctx->action); + if (uacpi_unlikely_error(ctx->ret)) + return UACPI_ITERATION_DECISION_BREAK; - switch (ctx->action) { - case GPE_BLOCK_ACTION_DISABLE_ALL: - value = 0; - break; - case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME: - value = reg->runtime_mask & ~reg->masked_mask; - break; - case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE: - value = reg->wake_mask; - break; - case GPE_BLOCK_ACTION_CLEAR_ALL: - ctx->ret = uacpi_gas_write(®->status, 0xFF); - if (uacpi_unlikely_error(ctx->ret)) - return UACPI_ITERATION_DECISION_BREAK; - continue; - default: - continue; - } + return UACPI_ITERATION_DECISION_CONTINUE; +} - reg->current_mask = value; - ctx->ret = uacpi_gas_write(®->enable, value); - if (uacpi_unlikely_error(ctx->ret)) - return UACPI_ITERATION_DECISION_BREAK; - } +static uacpi_status for_all_gpes_locked(struct do_for_all_gpes_ctx *ctx) +{ + uacpi_status ret; - return UACPI_ITERATION_DECISION_CONTINUE; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + for_each_gpe_block(do_for_all_gpes, ctx); + + uacpi_recursive_lock_release(&g_event_lock); + return ctx->ret; } uacpi_status uacpi_disable_all_gpes(void) @@ -1622,9 +1923,7 @@ uacpi_status uacpi_disable_all_gpes(void) struct do_for_all_gpes_ctx ctx = { .action = GPE_BLOCK_ACTION_DISABLE_ALL, }; - - for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + return for_all_gpes_locked(&ctx); } uacpi_status uacpi_enable_all_runtime_gpes(void) @@ -1632,9 +1931,7 @@ uacpi_status uacpi_enable_all_runtime_gpes(void) struct do_for_all_gpes_ctx ctx = { .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, }; - - for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + return for_all_gpes_locked(&ctx); } uacpi_status uacpi_enable_all_wake_gpes(void) @@ -1642,12 +1939,10 @@ uacpi_status uacpi_enable_all_wake_gpes(void) struct do_for_all_gpes_ctx ctx = { .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, }; - - for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + return for_all_gpes_locked(&ctx); } -static uacpi_status initialize_gpes() +static uacpi_status initialize_gpes(void) { uacpi_status ret; uacpi_namespace_node *gpe_node; @@ -1706,39 +2001,69 @@ uacpi_status uacpi_install_gpe_block( uacpi_address_space address_space, uacpi_u16 num_registers, uacpi_u32 irq ) { - uacpi_object *obj; + uacpi_status ret; + uacpi_bool is_dev; - obj = uacpi_namespace_node_get_object(gpe_device); - if (obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + if (!is_dev) return UACPI_STATUS_INVALID_ARGUMENT; - if (uacpi_unlikely(get_gpe(gpe_device, 0) != UACPI_NULL)) - return UACPI_STATUS_ALREADY_EXISTS; - return create_gpe_block( + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely(get_gpe(gpe_device, 0) != UACPI_NULL)) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + ret = create_gpe_block( gpe_device, irq, 0, address, address_space, num_registers ); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_uninstall_gpe_block( uacpi_namespace_node *gpe_device ) { - uacpi_object *obj; + uacpi_status ret; + uacpi_bool is_dev; struct gpe_search_ctx search_ctx = { .idx = 0, .gpe_device = gpe_device, }; - obj = uacpi_namespace_node_get_object(gpe_device); - if (uacpi_unlikely(obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + if (!is_dev) return UACPI_STATUS_INVALID_ARGUMENT; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + for_each_gpe_block(do_find_gpe, &search_ctx); - if (search_ctx.out_block == UACPI_NULL) - return UACPI_STATUS_NOT_FOUND; + if (search_ctx.out_block == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } uninstall_gpe_block(search_ctx.out_block); - return UACPI_STATUS_OK; + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } static uacpi_interrupt_ret handle_global_lock(uacpi_handle ctx) @@ -1778,23 +2103,41 @@ static uacpi_interrupt_ret handle_sci(uacpi_handle ctx) return int_ret; } -uacpi_status uacpi_initialize_events(void) +uacpi_status uacpi_initialize_events_early(void) { uacpi_status ret; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + g_gpe_state_slock = uacpi_kernel_create_spinlock(); + if (uacpi_unlikely(g_gpe_state_slock == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = uacpi_recursive_lock_init(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + ret = initialize_fixed_events(); if (uacpi_unlikely_error(ret)) return ret; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_initialize_events(void) +{ + uacpi_status ret; + + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + ret = initialize_gpes(); if (uacpi_unlikely_error(ret)) return ret; ret = uacpi_kernel_install_interrupt_handler( - g_uacpi_rt_ctx.fadt.sci_int, handle_sci, gpe_interrupt_head, + g_uacpi_rt_ctx.fadt.sci_int, handle_sci, g_gpe_interrupt_head, &g_uacpi_rt_ctx.sci_handle ); if (uacpi_unlikely_error(ret)) { @@ -1834,9 +2177,11 @@ uacpi_status uacpi_initialize_events(void) void uacpi_deinitialize_events(void) { - struct gpe_interrupt_ctx *ctx, *next_ctx = gpe_interrupt_head; + struct gpe_interrupt_ctx *ctx, *next_ctx = g_gpe_interrupt_head; uacpi_size i; + g_gpes_finalized = UACPI_FALSE; + while (next_ctx) { ctx = next_ctx; next_ctx = ctx->next; @@ -1854,7 +2199,14 @@ void uacpi_deinitialize_events(void) uacpi_uninstall_fixed_event_handler(i); } - gpe_interrupt_head = UACPI_NULL; + if (g_gpe_state_slock != UACPI_NULL) { + uacpi_kernel_free_spinlock(g_gpe_state_slock); + g_gpe_state_slock = UACPI_NULL; + } + + uacpi_recursive_lock_deinit(&g_event_lock); + + g_gpe_interrupt_head = UACPI_NULL; } uacpi_status uacpi_install_fixed_event_handler( @@ -1865,15 +2217,23 @@ uacpi_status uacpi_install_fixed_event_handler( uacpi_status ret; struct fixed_event_handler *ev; - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + ev = &fixed_event_handlers[event]; - if (ev->handler != UACPI_NULL) - return UACPI_STATUS_ALREADY_EXISTS; + if (ev->handler != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } ev->handler = handler; ev->ctx = user; @@ -1882,10 +2242,11 @@ uacpi_status uacpi_install_fixed_event_handler( if (uacpi_unlikely_error(ret)) { ev->handler = UACPI_NULL; ev->ctx = UACPI_NULL; - return ret; } - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_uninstall_fixed_event_handler( @@ -1895,21 +2256,31 @@ uacpi_status uacpi_uninstall_fixed_event_handler( uacpi_status ret; struct fixed_event_handler *ev; - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + ev = &fixed_event_handlers[event]; ret = set_event(event, UACPI_EVENT_DISABLED); if (uacpi_unlikely_error(ret)) - return ret; + goto out; + + uacpi_kernel_wait_for_work_completion(); ev->handler = UACPI_NULL; ev->ctx = UACPI_NULL; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_fixed_event_info( @@ -1921,11 +2292,17 @@ uacpi_status uacpi_fixed_event_info( uacpi_u64 raw_value; uacpi_event_info info = 0; - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_NOT_FOUND; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + if (fixed_event_handlers[event].handler != UACPI_NULL) info |= UACPI_EVENT_INFO_HAS_HANDLER; @@ -1933,18 +2310,20 @@ uacpi_status uacpi_fixed_event_info( ret = uacpi_read_register_field(ev->enable_field, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value) info |= UACPI_EVENT_INFO_ENABLED | UACPI_EVENT_INFO_HW_ENABLED; ret = uacpi_read_register_field(ev->status_field, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value) info |= UACPI_EVENT_INFO_HW_STATUS; *out_info = info; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_gpe_info( @@ -1958,10 +2337,16 @@ uacpi_status uacpi_gpe_info( uacpi_u64 raw_value; uacpi_event_info info = 0; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + if (event->handler_type != GPE_HANDLER_TYPE_NONE) info |= UACPI_EVENT_INFO_HAS_HANDLER; @@ -1977,18 +2362,20 @@ uacpi_status uacpi_gpe_info( ret = uacpi_gas_read(®->enable, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value & mask) info |= UACPI_EVENT_INFO_HW_ENABLED; ret = uacpi_gas_read(®->status, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value & mask) info |= UACPI_EVENT_INFO_HW_STATUS; *out_info = info; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } #define PM1_STATUS_BITS ( \ @@ -2009,12 +2396,22 @@ uacpi_status uacpi_clear_all_events(void) .action = GPE_BLOCK_ACTION_CLEAR_ALL, }; - ret = uacpi_write_register(UACPI_REGISTER_PM1_STS, PM1_STATUS_BITS); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = uacpi_write_register(UACPI_REGISTER_PM1_STS, PM1_STATUS_BITS); + if (uacpi_unlikely_error(ret)) + goto out; + for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + ret = ctx.ret; + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } #endif diff --git a/source/interpreter.c b/source/interpreter.c index 4c279fac..0454f6ad 100644 --- a/source/interpreter.c +++ b/source/interpreter.c @@ -730,7 +730,7 @@ static uacpi_status handle_buffer(struct execution_context *ctx) return UACPI_STATUS_OK; } -uacpi_status handle_string(struct execution_context *ctx) +static uacpi_status handle_string(struct execution_context *ctx) { struct call_frame *frame = ctx->cur_frame; uacpi_object *obj; @@ -879,7 +879,7 @@ static uacpi_size field_byte_size(uacpi_object *obj) return uacpi_round_up_bits_to_bytes(bit_length); } -static uacpi_size sizeof_int() +static uacpi_size sizeof_int(void) { return g_uacpi_rt_ctx.is_rev1 ? 4 : 8; } @@ -1271,7 +1271,7 @@ static uacpi_status do_load_table( return ret; if (is_dynamic_table_load(cause)) - ret = uacpi_events_match_post_dynamic_table_load(); + uacpi_events_match_post_dynamic_table_load(); return ret; } @@ -1537,7 +1537,7 @@ uacpi_status uacpi_execute_table(void *tbl, enum uacpi_table_load_cause cause) return ret; } -uacpi_u32 get_field_length(struct item *item) +static uacpi_u32 get_field_length(struct item *item) { struct package_length *pkg = &item->pkg; return pkg->end - pkg->begin; @@ -1847,7 +1847,7 @@ static void truncate_number_if_needed(uacpi_object *obj) obj->integer &= 0xFFFFFFFF; } -static uacpi_u64 ones() +static uacpi_u64 ones(void) { return g_uacpi_rt_ctx.is_rev1 ? 0xFFFFFFFF : 0xFFFFFFFFFFFFFFFF; } @@ -2151,7 +2151,7 @@ static uacpi_status debug_store(uacpi_object *src) /* * NOTE: this function returns the parent object */ -uacpi_object *reference_unwind(uacpi_object *obj) +static uacpi_object *reference_unwind(uacpi_object *obj) { uacpi_object *parent = obj; diff --git a/source/namespace.c b/source/namespace.c index 352ca473..585d1df2 100644 --- a/source/namespace.c +++ b/source/namespace.c @@ -124,12 +124,23 @@ static uacpi_object *make_object_for_predefined( return obj; } -static void free_namespace_node(uacpi_handle handle) +static void namespace_node_detach_object(uacpi_namespace_node *node) { - uacpi_namespace_node *node = handle; + uacpi_object *object; + + object = uacpi_namespace_node_get_object(node); + if (object != UACPI_NULL) { + if (object->type == UACPI_OBJECT_OPERATION_REGION) + uacpi_opregion_uninstall_handler(node); - if (node->object) uacpi_object_unref(node->object); + node->object = UACPI_NULL; + } +} + +static void free_namespace_node(uacpi_handle handle) +{ + uacpi_namespace_node *node = handle; if (uacpi_likely(!uacpi_namespace_node_is_predefined(node))) { uacpi_free(node, sizeof(*node)); @@ -194,11 +205,14 @@ uacpi_status uacpi_initialize_namespace(void) void uacpi_deinitialize_namespace(void) { + uacpi_status ret; uacpi_namespace_node *current, *next = UACPI_NULL; uacpi_u32 depth = 1; current = uacpi_namespace_root(); + ret = uacpi_namespace_write_lock(); + while (depth) { next = next == UACPI_NULL ? current->child : next->next; @@ -233,13 +247,18 @@ void uacpi_deinitialize_namespace(void) // This node has no children, move on to its peer } + namespace_node_detach_object(uacpi_namespace_root()); + free_namespace_node(uacpi_namespace_root()); + + if (ret == UACPI_STATUS_OK) + uacpi_namespace_write_unlock(); + uacpi_object_unref(g_uacpi_rt_ctx.root_object); g_uacpi_rt_ctx.root_object = UACPI_NULL; uacpi_mutex_unref(g_uacpi_rt_ctx.global_lock_mutex); g_uacpi_rt_ctx.global_lock_mutex = UACPI_NULL; - free_namespace_node(uacpi_namespace_root()); uacpi_rw_lock_deinit(&namespace_lock); } @@ -330,7 +349,6 @@ uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node) uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node) { uacpi_namespace_node *prev; - uacpi_object *object; if (uacpi_unlikely(uacpi_namespace_node_is_dangling(node))) { uacpi_warn("attempting to uninstall a dangling namespace node %.4s\n", @@ -398,14 +416,7 @@ uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node) * namespace node as well as potential infinite cycles between a namespace * node and an object. */ - object = uacpi_namespace_node_get_object(node); - if (object != UACPI_NULL) { - if (object->type == UACPI_OBJECT_OPERATION_REGION) - uacpi_opregion_uninstall_handler(node); - - uacpi_object_unref(node->object); - node->object = UACPI_NULL; - } + namespace_node_detach_object(node); prev = node->parent ? node->parent->child : UACPI_NULL; diff --git a/source/opregion.c b/source/opregion.c index d906054c..7f23c11a 100644 --- a/source/opregion.c +++ b/source/opregion.c @@ -426,7 +426,7 @@ struct reg_run_ctx { uacpi_size reg_errors; }; -uacpi_iteration_decision do_run_reg( +static uacpi_iteration_decision do_run_reg( void *opaque, uacpi_namespace_node *node, uacpi_u32 depth ) { diff --git a/source/registers.c b/source/registers.c index 5e5d293a..234e7590 100644 --- a/source/registers.c +++ b/source/registers.c @@ -351,6 +351,25 @@ static const struct register_field fields[UACPI_REGISTER_FIELD_MAX + 1] = { }, }; +static uacpi_handle g_reg_lock; + +uacpi_status uacpi_ininitialize_registers(void) +{ + g_reg_lock = uacpi_kernel_create_spinlock(); + if (uacpi_unlikely(g_reg_lock == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +void uacpi_deininitialize_registers(void) +{ + if (g_reg_lock != UACPI_NULL) { + uacpi_kernel_free_spinlock(g_reg_lock); + g_reg_lock = UACPI_NULL; + } +} + uacpi_status uacpi_read_register_field( enum uacpi_register_field field_enum, uacpi_u64 *out_value ) @@ -383,6 +402,7 @@ uacpi_status uacpi_write_register_field( const struct register_field *field; const struct register_spec *reg; uacpi_u64 data; + uacpi_cpu_flags flags; if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; @@ -392,19 +412,28 @@ uacpi_status uacpi_write_register_field( in_value = (in_value << field->offset) & field->mask; + flags = uacpi_kernel_lock_spinlock(g_reg_lock); + if (reg->kind == REGISTER_ACCESS_KIND_WRITE_TO_CLEAR) { - if (in_value == 0) - return UACPI_STATUS_OK; + if (in_value == 0) { + ret = UACPI_STATUS_OK; + goto out; + } - return do_write_register(reg, in_value); + ret = do_write_register(reg, in_value); + goto out; } ret = do_read_register(reg, &data); if (uacpi_unlikely_error(ret)) - return ret; + goto out; data &= ~field->mask; data |= in_value; - return do_write_register(reg, data); + ret = do_write_register(reg, data); + +out: + uacpi_kernel_unlock_spinlock(g_reg_lock, flags); + return ret; } diff --git a/source/tables.c b/source/tables.c index a30b0705..f5a178f7 100644 --- a/source/tables.c +++ b/source/tables.c @@ -9,7 +9,7 @@ DYNAMIC_ARRAY_WITH_INLINE_STORAGE( table_array, struct uacpi_installed_table, UACPI_STATIC_TABLE_ARRAY_LEN ) DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( - table_array, struct uacpi_installed_table, + table_array, struct uacpi_installed_table, static ) static struct table_array tables; diff --git a/source/types.c b/source/types.c index d95cc97c..29f77d70 100644 --- a/source/types.c +++ b/source/types.c @@ -1016,7 +1016,7 @@ uacpi_status uacpi_object_assign_integer(uacpi_object *obj, uacpi_u64 value) }, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); } -uacpi_status uacpi_object_do_get_string_or_buffer( +static uacpi_status uacpi_object_do_get_string_or_buffer( uacpi_object *obj, uacpi_data_view *out, uacpi_u32 mask ) { @@ -1166,7 +1166,7 @@ uacpi_object *uacpi_object_create_integer(uacpi_u64 value) return obj; } -uacpi_object *uacpi_object_do_create_string_or_buffer( +static uacpi_object *uacpi_object_do_create_string_or_buffer( uacpi_data_view view, uacpi_object_type type ) { diff --git a/source/uacpi.c b/source/uacpi.c index e7a79bde..a49a4e5e 100644 --- a/source/uacpi.c +++ b/source/uacpi.c @@ -13,6 +13,7 @@ #include #include #include +#include struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 }; @@ -23,6 +24,7 @@ void uacpi_state_reset(void) uacpi_deinitialize_events(); uacpi_deinitialize_notify(); uacpi_deinitialize_opregion(); + uacpi_deininitialize_registers(); uacpi_deinitialize_tables(); #ifndef UACPI_REDUCED_HARDWARE @@ -297,6 +299,14 @@ uacpi_status uacpi_initialize(uacpi_u64 flags) if (uacpi_unlikely_error(ret)) goto out_fatal_error; + ret = uacpi_ininitialize_registers(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_initialize_events_early(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + ret = uacpi_initialize_opregion(); if (uacpi_unlikely_error(ret)) goto out_fatal_error; diff --git a/source/utilities.c b/source/utilities.c index 1a30a870..33da1c12 100644 --- a/source/utilities.c +++ b/source/utilities.c @@ -928,7 +928,7 @@ struct device_find_ctx { uacpi_iteration_callback cb; }; -uacpi_iteration_decision find_one_device( +static uacpi_iteration_decision find_one_device( void *opaque, uacpi_namespace_node *node, uacpi_u32 depth ) { diff --git a/tests/runner/CMakeLists.txt b/tests/runner/CMakeLists.txt index 4ee2d6af..ade867f0 100644 --- a/tests/runner/CMakeLists.txt +++ b/tests/runner/CMakeLists.txt @@ -97,6 +97,9 @@ else () PRIVATE -fsanitize=address,undefined -g3 ) + add_compile_options( + $<$:-Wstrict-prototypes> + ) endif () find_package(Threads REQUIRED) diff --git a/tests/runner/interface_impl.cpp b/tests/runner/interface_impl.cpp index 0043903e..badd291a 100644 --- a/tests/runner/interface_impl.cpp +++ b/tests/runner/interface_impl.cpp @@ -49,6 +49,7 @@ uacpi_status uacpi_kernel_initialize(uacpi_init_level lvl) void uacpi_kernel_deinitialize(void) { delete[] io_space; + io_space = nullptr; } #endif diff --git a/tests/runner/test_runner.cpp b/tests/runner/test_runner.cpp index d4a5628e..8fe9e8b8 100644 --- a/tests/runner/test_runner.cpp +++ b/tests/runner/test_runner.cpp @@ -15,6 +15,7 @@ #include #include #include +#include void run_resource_tests(); @@ -272,6 +273,14 @@ static uacpi_status handle_ec(uacpi_region_op op, uacpi_handle op_data) } } +static uacpi_interrupt_ret handle_gpe( + uacpi_handle, uacpi_namespace_node *, uacpi_u16 +) +{ + std::cout << "got a GPE" << std::endl; + return UACPI_INTERRUPT_HANDLED | UACPI_GPE_REENABLE; +} + static void ensure_ok_status(uacpi_status st) { if (st == UACPI_STATUS_OK) @@ -507,6 +516,20 @@ static void run_test( ); ensure_ok_status(st); + st = uacpi_install_gpe_handler( + UACPI_NULL, 123, UACPI_GPE_TRIGGERING_EDGE, handle_gpe, UACPI_NULL + ); + ensure_ok_status(st); + + st = uacpi_enable_gpe(UACPI_NULL, 123); + ensure_ok_status(st); + + st = uacpi_disable_gpe(UACPI_NULL, 123); + ensure_ok_status(st); + + st = uacpi_uninstall_gpe_handler(UACPI_NULL, 123, handle_gpe); + ensure_ok_status(st); + st = uacpi_namespace_initialize(); ensure_ok_status(st);