././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.10.0.sigs0000660000000000000000000002372200000000000016342 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_context_pop_use: void (struct tevent_context *, const char *) _tevent_context_push_use: bool (struct tevent_context *, const char *) _tevent_context_wrapper_create: struct tevent_context *(struct tevent_context *, TALLOC_CTX *, const struct tevent_wrapper_ops *, void *, size_t, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_abort: void (struct tevent_context *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_double_free: void (TALLOC_CTX *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_invoke_fd_handler: int (struct tevent_fd *, uint16_t, bool *) tevent_common_invoke_immediate_handler: int (struct tevent_immediate *, bool *) tevent_common_invoke_signal_handler: int (struct tevent_signal *, int, int, void *, bool *) tevent_common_invoke_timer_handler: int (struct tevent_timer *, struct timeval, bool *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_context_is_wrapper: bool (struct tevent_context *) tevent_context_same_loop: bool (struct tevent_context *, struct tevent_context *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_get_profile: const struct tevent_req_profile *(struct tevent_req *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_move_profile: struct tevent_req_profile *(struct tevent_req *, TALLOC_CTX *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_profile_append_sub: void (struct tevent_req_profile *, struct tevent_req_profile **) tevent_req_profile_create: struct tevent_req_profile *(TALLOC_CTX *) tevent_req_profile_get_name: void (const struct tevent_req_profile *, const char **) tevent_req_profile_get_start: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_status: void (const struct tevent_req_profile *, pid_t *, enum tevent_req_state *, uint64_t *) tevent_req_profile_get_stop: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_subprofiles: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_next: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_set_name: bool (struct tevent_req_profile *, const char *) tevent_req_profile_set_start: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_profile_set_status: void (struct tevent_req_profile *, pid_t, enum tevent_req_state, uint64_t) tevent_req_profile_set_stop: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_req_set_profile: bool (struct tevent_req *) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/ABI/tevent-0.10.1.sigs0000660000000000000000000002372200000000000016343 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_context_pop_use: void (struct tevent_context *, const char *) _tevent_context_push_use: bool (struct tevent_context *, const char *) _tevent_context_wrapper_create: struct tevent_context *(struct tevent_context *, TALLOC_CTX *, const struct tevent_wrapper_ops *, void *, size_t, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_abort: void (struct tevent_context *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_double_free: void (TALLOC_CTX *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_invoke_fd_handler: int (struct tevent_fd *, uint16_t, bool *) tevent_common_invoke_immediate_handler: int (struct tevent_immediate *, bool *) tevent_common_invoke_signal_handler: int (struct tevent_signal *, int, int, void *, bool *) tevent_common_invoke_timer_handler: int (struct tevent_timer *, struct timeval, bool *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_context_is_wrapper: bool (struct tevent_context *) tevent_context_same_loop: bool (struct tevent_context *, struct tevent_context *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_get_profile: const struct tevent_req_profile *(struct tevent_req *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_move_profile: struct tevent_req_profile *(struct tevent_req *, TALLOC_CTX *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_profile_append_sub: void (struct tevent_req_profile *, struct tevent_req_profile **) tevent_req_profile_create: struct tevent_req_profile *(TALLOC_CTX *) tevent_req_profile_get_name: void (const struct tevent_req_profile *, const char **) tevent_req_profile_get_start: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_status: void (const struct tevent_req_profile *, pid_t *, enum tevent_req_state *, uint64_t *) tevent_req_profile_get_stop: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_subprofiles: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_next: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_set_name: bool (struct tevent_req_profile *, const char *) tevent_req_profile_set_start: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_profile_set_status: void (struct tevent_req_profile *, pid_t, enum tevent_req_state, uint64_t) tevent_req_profile_set_stop: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_req_set_profile: bool (struct tevent_req *) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/ABI/tevent-0.10.2.sigs0000660000000000000000000002372200000000000016344 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_context_pop_use: void (struct tevent_context *, const char *) _tevent_context_push_use: bool (struct tevent_context *, const char *) _tevent_context_wrapper_create: struct tevent_context *(struct tevent_context *, TALLOC_CTX *, const struct tevent_wrapper_ops *, void *, size_t, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_abort: void (struct tevent_context *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_double_free: void (TALLOC_CTX *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_invoke_fd_handler: int (struct tevent_fd *, uint16_t, bool *) tevent_common_invoke_immediate_handler: int (struct tevent_immediate *, bool *) tevent_common_invoke_signal_handler: int (struct tevent_signal *, int, int, void *, bool *) tevent_common_invoke_timer_handler: int (struct tevent_timer *, struct timeval, bool *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_context_is_wrapper: bool (struct tevent_context *) tevent_context_same_loop: bool (struct tevent_context *, struct tevent_context *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_get_profile: const struct tevent_req_profile *(struct tevent_req *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_move_profile: struct tevent_req_profile *(struct tevent_req *, TALLOC_CTX *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_profile_append_sub: void (struct tevent_req_profile *, struct tevent_req_profile **) tevent_req_profile_create: struct tevent_req_profile *(TALLOC_CTX *) tevent_req_profile_get_name: void (const struct tevent_req_profile *, const char **) tevent_req_profile_get_start: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_status: void (const struct tevent_req_profile *, pid_t *, enum tevent_req_state *, uint64_t *) tevent_req_profile_get_stop: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_subprofiles: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_next: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_set_name: bool (struct tevent_req_profile *, const char *) tevent_req_profile_set_start: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_profile_set_status: void (struct tevent_req_profile *, pid_t, enum tevent_req_state, uint64_t) tevent_req_profile_set_stop: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_req_set_profile: bool (struct tevent_req *) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/ABI/tevent-0.11.0.sigs0000660000000000000000000002732600000000000016347 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_context_pop_use: void (struct tevent_context *, const char *) _tevent_context_push_use: bool (struct tevent_context *, const char *) _tevent_context_wrapper_create: struct tevent_context *(struct tevent_context *, TALLOC_CTX *, const struct tevent_wrapper_ops *, void *, size_t, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_abort: void (struct tevent_context *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_double_free: void (TALLOC_CTX *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_invoke_fd_handler: int (struct tevent_fd *, uint16_t, bool *) tevent_common_invoke_immediate_handler: int (struct tevent_immediate *, bool *) tevent_common_invoke_signal_handler: int (struct tevent_signal *, int, int, void *, bool *) tevent_common_invoke_timer_handler: int (struct tevent_timer *, struct timeval, bool *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_context_is_wrapper: bool (struct tevent_context *) tevent_context_same_loop: bool (struct tevent_context *, struct tevent_context *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_get_tag: uint64_t (const struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_fd_set_tag: void (struct tevent_fd *, uint64_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_get_trace_fd_callback: void (struct tevent_context *, tevent_trace_fd_callback_t *, void *) tevent_get_trace_immediate_callback: void (struct tevent_context *, tevent_trace_immediate_callback_t *, void *) tevent_get_trace_signal_callback: void (struct tevent_context *, tevent_trace_signal_callback_t *, void *) tevent_get_trace_timer_callback: void (struct tevent_context *, tevent_trace_timer_callback_t *, void *) tevent_immediate_get_tag: uint64_t (const struct tevent_immediate *) tevent_immediate_set_tag: void (struct tevent_immediate *, uint64_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_get_profile: const struct tevent_req_profile *(struct tevent_req *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_move_profile: struct tevent_req_profile *(struct tevent_req *, TALLOC_CTX *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_profile_append_sub: void (struct tevent_req_profile *, struct tevent_req_profile **) tevent_req_profile_create: struct tevent_req_profile *(TALLOC_CTX *) tevent_req_profile_get_name: void (const struct tevent_req_profile *, const char **) tevent_req_profile_get_start: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_status: void (const struct tevent_req_profile *, pid_t *, enum tevent_req_state *, uint64_t *) tevent_req_profile_get_stop: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_subprofiles: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_next: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_set_name: bool (struct tevent_req_profile *, const char *) tevent_req_profile_set_start: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_profile_set_status: void (struct tevent_req_profile *, pid_t, enum tevent_req_state, uint64_t) tevent_req_profile_set_stop: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_req_set_profile: bool (struct tevent_req *) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_set_trace_fd_callback: void (struct tevent_context *, tevent_trace_fd_callback_t, void *) tevent_set_trace_immediate_callback: void (struct tevent_context *, tevent_trace_immediate_callback_t, void *) tevent_set_trace_signal_callback: void (struct tevent_context *, tevent_trace_signal_callback_t, void *) tevent_set_trace_timer_callback: void (struct tevent_context *, tevent_trace_timer_callback_t, void *) tevent_signal_get_tag: uint64_t (const struct tevent_signal *) tevent_signal_set_tag: void (struct tevent_signal *, uint64_t) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timer_get_tag: uint64_t (const struct tevent_timer *) tevent_timer_set_tag: void (struct tevent_timer *, uint64_t) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_fd_callback: void (struct tevent_context *, struct tevent_fd *, enum tevent_event_trace_point) tevent_trace_immediate_callback: void (struct tevent_context *, struct tevent_immediate *, enum tevent_event_trace_point) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_trace_signal_callback: void (struct tevent_context *, struct tevent_signal *, enum tevent_event_trace_point) tevent_trace_timer_callback: void (struct tevent_context *, struct tevent_timer *, enum tevent_event_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.10.sigs0000660000000000000000000001276100000000000016354 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.11.sigs0000660000000000000000000001276100000000000016355 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.12.sigs0000660000000000000000000001305300000000000016351 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.13.sigs0000660000000000000000000001317200000000000016354 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.14.sigs0000660000000000000000000001375600000000000016365 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.15.sigs0000660000000000000000000001375600000000000016366 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.16.sigs0000660000000000000000000001453500000000000016363 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.17.sigs0000660000000000000000000001453500000000000016364 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.18.sigs0000660000000000000000000001500200000000000016353 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.19.sigs0000660000000000000000000001500200000000000016354 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.20.sigs0000660000000000000000000001535300000000000016355 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.21.sigs0000660000000000000000000001547000000000000016356 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.22.sigs0000660000000000000000000001547000000000000016357 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.23.sigs0000660000000000000000000001547000000000000016360 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.24.sigs0000660000000000000000000001547000000000000016361 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.25.sigs0000660000000000000000000001547000000000000016362 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.26.sigs0000660000000000000000000001601400000000000016356 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.27.sigs0000660000000000000000000001601400000000000016357 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.28.sigs0000660000000000000000000001601400000000000016360 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.29.sigs0000660000000000000000000001601400000000000016361 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.30.sigs0000660000000000000000000001701500000000000016353 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.31.sigs0000660000000000000000000001724700000000000016363 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.32.sigs0000660000000000000000000001724700000000000016364 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.33.sigs0000660000000000000000000001724700000000000016365 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.34.sigs0000660000000000000000000001724700000000000016366 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.35.sigs0000660000000000000000000001724700000000000016367 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.36.sigs0000660000000000000000000001735000000000000016363 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.37.sigs0000660000000000000000000002372200000000000016364 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_context_pop_use: void (struct tevent_context *, const char *) _tevent_context_push_use: bool (struct tevent_context *, const char *) _tevent_context_wrapper_create: struct tevent_context *(struct tevent_context *, TALLOC_CTX *, const struct tevent_wrapper_ops *, void *, size_t, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_abort: void (struct tevent_context *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_double_free: void (TALLOC_CTX *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_invoke_fd_handler: int (struct tevent_fd *, uint16_t, bool *) tevent_common_invoke_immediate_handler: int (struct tevent_immediate *, bool *) tevent_common_invoke_signal_handler: int (struct tevent_signal *, int, int, void *, bool *) tevent_common_invoke_timer_handler: int (struct tevent_timer *, struct timeval, bool *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_context_is_wrapper: bool (struct tevent_context *) tevent_context_same_loop: bool (struct tevent_context *, struct tevent_context *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_get_profile: const struct tevent_req_profile *(struct tevent_req *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_move_profile: struct tevent_req_profile *(struct tevent_req *, TALLOC_CTX *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_profile_append_sub: void (struct tevent_req_profile *, struct tevent_req_profile **) tevent_req_profile_create: struct tevent_req_profile *(TALLOC_CTX *) tevent_req_profile_get_name: void (const struct tevent_req_profile *, const char **) tevent_req_profile_get_start: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_status: void (const struct tevent_req_profile *, pid_t *, enum tevent_req_state *, uint64_t *) tevent_req_profile_get_stop: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_subprofiles: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_next: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_set_name: bool (struct tevent_req_profile *, const char *) tevent_req_profile_set_start: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_profile_set_status: void (struct tevent_req_profile *, pid_t, enum tevent_req_state, uint64_t) tevent_req_profile_set_stop: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_req_set_profile: bool (struct tevent_req *) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.38.sigs0000660000000000000000000002372200000000000016365 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_context_pop_use: void (struct tevent_context *, const char *) _tevent_context_push_use: bool (struct tevent_context *, const char *) _tevent_context_wrapper_create: struct tevent_context *(struct tevent_context *, TALLOC_CTX *, const struct tevent_wrapper_ops *, void *, size_t, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_abort: void (struct tevent_context *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_double_free: void (TALLOC_CTX *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_invoke_fd_handler: int (struct tevent_fd *, uint16_t, bool *) tevent_common_invoke_immediate_handler: int (struct tevent_immediate *, bool *) tevent_common_invoke_signal_handler: int (struct tevent_signal *, int, int, void *, bool *) tevent_common_invoke_timer_handler: int (struct tevent_timer *, struct timeval, bool *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_context_is_wrapper: bool (struct tevent_context *) tevent_context_same_loop: bool (struct tevent_context *, struct tevent_context *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_get_profile: const struct tevent_req_profile *(struct tevent_req *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_move_profile: struct tevent_req_profile *(struct tevent_req *, TALLOC_CTX *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_profile_append_sub: void (struct tevent_req_profile *, struct tevent_req_profile **) tevent_req_profile_create: struct tevent_req_profile *(TALLOC_CTX *) tevent_req_profile_get_name: void (const struct tevent_req_profile *, const char **) tevent_req_profile_get_start: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_status: void (const struct tevent_req_profile *, pid_t *, enum tevent_req_state *, uint64_t *) tevent_req_profile_get_stop: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_subprofiles: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_next: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_set_name: bool (struct tevent_req_profile *, const char *) tevent_req_profile_set_start: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_profile_set_status: void (struct tevent_req_profile *, pid_t, enum tevent_req_state, uint64_t) tevent_req_profile_set_stop: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_req_set_profile: bool (struct tevent_req *) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.39.sigs0000660000000000000000000002372200000000000016366 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_context_pop_use: void (struct tevent_context *, const char *) _tevent_context_push_use: bool (struct tevent_context *, const char *) _tevent_context_wrapper_create: struct tevent_context *(struct tevent_context *, TALLOC_CTX *, const struct tevent_wrapper_ops *, void *, size_t, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_req_oom: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) _tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_abort: void (struct tevent_context *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_double_free: void (TALLOC_CTX *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_have_events: bool (struct tevent_context *) tevent_common_invoke_fd_handler: int (struct tevent_fd *, uint16_t, bool *) tevent_common_invoke_immediate_handler: int (struct tevent_immediate *, bool *) tevent_common_invoke_signal_handler: int (struct tevent_signal *, int, int, void *, bool *) tevent_common_invoke_timer_handler: int (struct tevent_timer *, struct timeval, bool *) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_common_threaded_activate_immediate: void (struct tevent_context *) tevent_common_wakeup: int (struct tevent_context *) tevent_common_wakeup_fd: int (int) tevent_common_wakeup_init: int (struct tevent_context *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *) tevent_context_is_wrapper: bool (struct tevent_context *) tevent_context_same_loop: bool (struct tevent_context *, struct tevent_context *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_num_signals: size_t (void) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_entry_untrigger: void (struct tevent_queue_entry *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_running: bool (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_queue_wait_recv: bool (struct tevent_req *) tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *) tevent_req_get_profile: const struct tevent_req_profile *(struct tevent_req *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_move_profile: struct tevent_req_profile *(struct tevent_req *, TALLOC_CTX *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_profile_append_sub: void (struct tevent_req_profile *, struct tevent_req_profile **) tevent_req_profile_create: struct tevent_req_profile *(TALLOC_CTX *) tevent_req_profile_get_name: void (const struct tevent_req_profile *, const char **) tevent_req_profile_get_start: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_status: void (const struct tevent_req_profile *, pid_t *, enum tevent_req_state *, uint64_t *) tevent_req_profile_get_stop: void (const struct tevent_req_profile *, const char **, struct timeval *) tevent_req_profile_get_subprofiles: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_next: const struct tevent_req_profile *(const struct tevent_req_profile *) tevent_req_profile_set_name: bool (struct tevent_req_profile *, const char *) tevent_req_profile_set_start: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_profile_set_status: void (struct tevent_req_profile *, pid_t, enum tevent_req_state, uint64_t) tevent_req_profile_set_stop: bool (struct tevent_req_profile *, const char *, struct timeval) tevent_req_received: void (struct tevent_req *) tevent_req_reset_endtime: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_req_set_profile: bool (struct tevent_req *) tevent_sa_info_queue_count: size_t (void) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *) tevent_signal_support: bool (struct tevent_context *) tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *) tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *) tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point) tevent_update_timer: void (struct tevent_timer *, struct timeval) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/ABI/tevent-0.9.9.sigs0000660000000000000000000001276100000000000016304 0ustar00rootroot00000000000000_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) _tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) _tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) _tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *) _tevent_loop_once: int (struct tevent_context *, const char *) _tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *) _tevent_loop_wait: int (struct tevent_context *, const char *) _tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *) _tevent_req_callback_data: void *(struct tevent_req *) _tevent_req_cancel: bool (struct tevent_req *, const char *) _tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *) _tevent_req_data: void *(struct tevent_req *) _tevent_req_done: void (struct tevent_req *, const char *) _tevent_req_error: bool (struct tevent_req *, uint64_t, const char *) _tevent_req_nomem: bool (const void *, struct tevent_req *, const char *) _tevent_req_notify_callback: void (struct tevent_req *, const char *) _tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_backend_list: const char **(TALLOC_CTX *) tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *) tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *) tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *) tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *) tevent_common_check_signal: int (struct tevent_context *) tevent_common_context_destructor: int (struct tevent_context *) tevent_common_fd_destructor: int (struct tevent_fd *) tevent_common_fd_get_flags: uint16_t (struct tevent_fd *) tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_common_loop_immediate: bool (struct tevent_context *) tevent_common_loop_timer_delay: struct timeval (struct tevent_context *) tevent_common_loop_wait: int (struct tevent_context *, const char *) tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *) tevent_context_init: struct tevent_context *(TALLOC_CTX *) tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *) tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...) tevent_fd_get_flags: uint16_t (struct tevent_fd *) tevent_fd_set_auto_close: void (struct tevent_fd *) tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t) tevent_fd_set_flags: void (struct tevent_fd *, uint16_t) tevent_loop_allow_nesting: void (struct tevent_context *) tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *) tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *) tevent_queue_length: size_t (struct tevent_queue *) tevent_queue_start: void (struct tevent_queue *) tevent_queue_stop: void (struct tevent_queue *) tevent_re_initialise: int (struct tevent_context *) tevent_register_backend: bool (const char *, const struct tevent_ops *) tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *) tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *) tevent_req_is_in_progress: bool (struct tevent_req *) tevent_req_poll: bool (struct tevent_req *, struct tevent_context *) tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *) tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *) tevent_req_received: void (struct tevent_req *) tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *) tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn) tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval) tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn) tevent_set_abort_fn: void (void (*)(const char *)) tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *) tevent_set_debug_stderr: int (struct tevent_context *) tevent_set_default_backend: void (const char *) tevent_signal_support: bool (struct tevent_context *) tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t) tevent_timeval_compare: int (const struct timeval *, const struct timeval *) tevent_timeval_current: struct timeval (void) tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t) tevent_timeval_is_zero: bool (const struct timeval *) tevent_timeval_set: struct timeval (uint32_t, uint32_t) tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *) tevent_timeval_zero: struct timeval (void) tevent_wakeup_recv: bool (struct tevent_req *) tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/Makefile0000660000000000000000000000135300000000000014475 0ustar00rootroot00000000000000# simple makefile wrapper to run waf WAF_BIN=`PATH=buildtools/bin:../../buildtools/bin:$$PATH which waf` WAF_BINARY=$(PYTHON) $(WAF_BIN) WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) dist: touch .tmplock WAFLOCK=.tmplock $(WAF) dist distcheck: touch .tmplock WAFLOCK=.tmplock $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/bindings.py0000660000000000000000000000733500000000000015212 0ustar00rootroot00000000000000#!/usr/bin/python # # Python integration for tevent - tests # # Copyright (C) Jelmer Vernooij 2010 # # ** NOTE! The following LGPL license applies to the tevent # ** library. This does NOT imply that all of Samba is released # ** under the LGPL # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see . import signal from unittest import TestCase, TestProgram import gc import _tevent class BackendListTests(TestCase): def test_backend_list(self): self.assertTrue(isinstance(_tevent.backend_list(), list)) class CreateContextTests(TestCase): def test_by_name(self): ctx = _tevent.Context(_tevent.backend_list()[0]) self.assertTrue(ctx is not None) def test_no_name(self): ctx = _tevent.Context() self.assertTrue(ctx is not None) class ContextTests(TestCase): def setUp(self): super(ContextTests, self).setUp() self.ctx = _tevent.Context() def test_signal_support(self): self.assertTrue(type(self.ctx.signal_support) is bool) def test_reinitialise(self): self.ctx.reinitialise() def test_loop_wait(self): self.ctx.loop_wait() def test_add_signal(self): sig = self.ctx.add_signal(signal.SIGINT, 0, lambda callback: None) self.assertTrue(isinstance(sig, _tevent.Signal)) def test_timer(self): """Test a timer is can be scheduled""" collecting_list = [] # time "0" has already passed, callback will be scheduled immediately timer = self.ctx.add_timer(0, lambda t: collecting_list.append(True)) self.assertTrue(timer.active) self.assertEqual(collecting_list, []) self.ctx.loop_once() self.assertFalse(timer.active) self.assertEqual(collecting_list, [True]) def test_timer_deallocate_timer(self): """Test timer is scheduled even if reference to it isn't held""" collecting_list = [] def callback(t): collecting_list.append(True) timer = self.ctx.add_timer(0, lambda t: collecting_list.append(True)) gc.collect() self.assertEqual(collecting_list, []) self.ctx.loop_once() self.assertEqual(collecting_list, [True]) def test_timer_deallocate_context(self): """Test timer is unscheduled when context is freed""" collecting_list = [] def callback(t): collecting_list.append(True) timer = self.ctx.add_timer(0, lambda t: collecting_list.append(True)) self.assertTrue(timer.active) del self.ctx gc.collect() self.assertEqual(collecting_list, []) self.assertFalse(timer.active) def test_timer_offset(self): """Test scheduling timer with an offset""" collecting_list = [] self.ctx.add_timer_offset(0.2, lambda t: collecting_list.append(2)) self.ctx.add_timer_offset(0.1, lambda t: collecting_list.append(1)) self.assertEqual(collecting_list, []) self.ctx.loop_once() self.assertEqual(collecting_list, [1]) self.ctx.loop_once() self.assertEqual(collecting_list, [1, 2]) if __name__ == '__main__': TestProgram() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/configure0000770000000000000000000000065700000000000014751 0ustar00rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` if [ -f $PREVPATH/../../buildtools/bin/waf ]; then WAF=../../buildtools/bin/waf elif [ -f $PREVPATH/buildtools/bin/waf ]; then WAF=./buildtools/bin/waf else echo "tevent: Unable to find waf" exit 1 fi # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd . || exit 1 $PYTHON $WAF configure "$@" || exit 1 cd $PREVPATH ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/img/tevent_context_stucture.png0000660000000000000000000005260000000000000022074 0ustar00rootroot00000000000000‰PNG  IHDR-^ÆÕ¸×UGIDATxÚí½w\TGÛÿOï¤I D, Ñ;Š4–[ ¶ƒ±Ý&Æ’¦b¬Ñ<‰1¶X"Ñ;v%±5‚ EEEEP ŠbC$¿Ïã|³¿}¶¹,,ì.Ÿ÷¼ØSæœ3sÍõ™kΜ£¿ !„êÈY@!„:D!„:D!„P‡!„P‡!„ C>¼páÂÎ;W­Z5oÞ¼©S§&½`Ú´iß}÷ݦM›Nœ8‘ŸŸÿüùs–!„P‡*‡¬¬¬¥K—¾ýöÛþþþ¦¦¦&R¿@z pvvîØ±ã¨Q£~úé§Û·oS–!„:T>ž›ô‚‰'<¸sçÎááávvvÆÿ€mllºtéòõ×_#N*++cùBuH™™™3fÌ 433Úƒ0hàÀË—/?}útqqqiiés)Ê^ ½ „úùçŸ?úè£ÈÈHkkk‰ uïÞý÷߇ȱ !„:$ËÑ£Gû÷ïïääÍ€ü ²ùøãÏž=ûôéS¡7åJM"N7oÞ\°`A‡¬¬¬„ µlÙ299ùþýû !„:ôÿb (èOCÔòÆo¤¤¤ÝÉÉÉßßÿÔ©Sz7w΃bcc---ûöí{çΆt„Rsu())ÉÑѱI“&×®]Óë—+¸ù~ýúY[[¿ýöÛº?ØB¨CÿKff¦XPuóæÍ°øé… ‚ƒƒ-,,¶nÝÊõŠ!DtHŒ‘{çw &€˜={¶““S‹-®_¿Î±s„¢Ó:tôèÑÚµk[ZZ9rD§f«÷îÝ 311Y¶lçû!„Ö¡Áƒ#9rdaa¡!=ö¢E‹\\\¢££srrh„¢£:$Þ ÙÚÚ¦¥¥Øè²ââ∈SSÓ-[¶ð-!„è¨Í™3ÇÙÙ9>>Þ ?üœ}º···Ïš5k ï¿ÿ>??¿äädÉ’Ûø?22ÒÞÞ¾mÛ¶iiiÏž=+o *Ø·o_LL ‡ö}¬¬¬¦OŸÎ™!DçtèñãÇõë×711ùõ×_ òÏÌÌÌÀÀ@KKËsçÎՄϤ!DÏtŒ7ÎÞÞ^LQjxOž””äèèøúë¯çååÑ!DuèìÙ³vvvéééöÿáǯ¼ò ¢½7r:BÑQ 666†7ZAŒPˆŠŠR±N+!„êס#GŽx{{[YY½tM =¢¨¨HCK—.åÛ„¢Ó:TVVöÖ[oY[[2¤  À0žyîܹÎÎΆ²³³9BBtZ‡À… ÌÍÍ·nÝZ]ÓžV"™™™uêÔÁãp¡BÑ“'OvttlÖ¬™ @x‡¿9B S‡îÝ»233›2e þ×ß§]³fM­Zµœ o !„²Çûúú"’Ø´i“žvgedd„‡‡›˜˜,_¾œÃ!DÏt¨¬¬lêÔ©ŽŽŽçϟ׻`ÂÓ©S' ‹øøøÛ·o³à !DÏt””” 0ÀÆÆ¦M›6z·jܨQ£ìììš4irùòeŽ‘#„½Ô!““Wnff†¨BÞó#’srrrvvÞ³gÔ”¥N!úªCàÏ?ÿ 211yï½÷ uÿ ,Xàêêjii¹nݺG±È !D¿u¨¬¬,55500PHÑíÛ·u¹›K"B?ýôÇ&Bˆ!è¢@Šâããoݺ¥›R4iÒ$'''ˆÐ÷ß_\\ÌÂ&„Ñ!™¨¨M›6—/_Ö©t}FŒaooÏHˆB S‡„ýùçŸfff¤mÛ¶éÈwEíÛ·‡9;;ÿüóÏ|'D!†©CBŠrrrúõëgcccmm””Tí³-¬Y³FDiMš49xð LˆG!Ô¡—PRR2iÒ$GGGccãæÍ›ïÝ»·Z¼fffÿþýmmmŒŒâãã³³³9s!„ÔÑáÇ5jdfffaa1|øðË—/WÙzEEEE_|ñ…··7 ggç•+WrT!„Ô,Rt÷îÝO?ýÔÅÅz`mmýá‡Þ¼yS«AÉÇ¿ÿþûàà`³¼õÖ[—.]2˜õú!„:Tn :çÏŸïÓ§tÈØØØÞÞ¾ÿþû÷ï¯ôžºÌÌLhž¿¿?äÇÈÈ(**j×®]?f‰BHÖ!‰>|ø7Þj$F Lš4 õìÙ³Š¤œ››»pḸ8$ BÊ+V¬@(ÆYã!„:ôÿU±ÑçŸîççgjj AÂß   ÄÄÄU«V;wNͩޮ]»¶}ûvÈXtt´äIYZZvéÒ1УG¨@„BR%HŠ”””wß}7$$ÄÌÌLDHÀÍÍ-22²gÏž ãÇŸ2eJÒ >ùä“!C†tíÚ5""ÂÑѧíòÓ©S§ùóççåå•––R!„:TŽðÊéûï¿0`€’TcddäààЮ]»áǯX±âÆÏž=£üBu¨4éþýûçÎÛ¶mÛòåËgÏžgmm5qâÄE‹mܸ155qOIIÉóP~!„:¤YÌŸ?ßÙÙyôèÑ’,!B¡U_}õtè?ÿùÏ_ýÅ‚!„êuˆBuˆBuˆ:D!„:D!„:D"„B"„B¢B¡B¡Q‡!„P‡!„P‡¨C„B¨C„B¨CÔ!B!Ô!B!Ô!ê!„ê!„êuˆBuˆBuˆ:DˆbŽ=:{öì$RµìܹóÑ£G,—êÊjê}SMw1:j«««1©Z^êvX.ZÍjê}SMw1ºVÖ¨QQQãÇŸL´Oll¬µµµ::ÄrÑ^VS‡è›jº‹ÑÁ²=ztAAAÑ>óçÏWÇí°\´šÕÔ!ú¦šîbt°¬Y t-ÃY.ZÍjê}ó÷L#¡Q‡XÒÌæËš:Db ¤ob¾ñžU™™9iÒ¤€€€«W¯ªsüÆ[·níèè¸`Á‚û÷ï+L§N:¦ÿ••¥¥›WvÐ!]+ ²š:Dߤ÷u ‚.†e­&M›6533322º|ùòKþý÷ß½½½×­[WXX8a„ðððììl…éãÿ‹/–––¦¤¤Ô®]åhgg·eË–'Ož(,eeVqëÖ­Ÿ~úéwÞñðð¸råŠô®ÒÉÜ¿讕‹YM¢oÒõ: 9g%&&º¸¸´jÕ*==]²«‚.F_Ê://Ϩ½ˆá¥<{öL>“•1°±±A âÿç/P–Ž(kñYY<)) -hɱcǤKÏîçç·k×®Ç+¼h—.]^yåKKKe7)}­ÊÒ!–‹Âr)WVS‡è›t½HˆŽŽÆé Ð-‰†UÐÅèKY¯_¿¾V­Zêä³öPX. 366Vv¤t:òiÂ뉡¡¡”¥íÛU\Gª6HmèËEa¹P‡jÕœ:°mÛ6OOÏo¾ù¦¸¸-/ü?kÖ,I·žÁëЭ[·"""LMMõ¥¬U©º¬ÅƘ˜[[ÛvíÚåææª©C/½t¥ëË…:TÓu¨FÕ#FÀúwìØñôéSäC@@@\\ÂÁ¢C]ºtÝM(n777ñ-99922ÒÞÞ¾mÛ¶iiik×®…<‹núÖ­[Ã|8tôÈ‘#!ˆ¨¿¿ll¬Ìˆòº˜íÛ·+Œ‡©899I|\BB‚ä}¬f»ÆC3gάˆ¿ëÚµ+¤.&&&;;;''§iӦ׮]»ð300°E‹ˆ¤ñ¿$e§¨(kÄgˆ;“““?~ Õ÷óó“.2õËZE:/õwÂå!Oììì¤,•¢‚ýr/DXÅœ¹,™r¡†×_Ž¿5¼lܸÑÝÝ}Ñ¢Eå·­ÚÅ(¼Ï’’$îíímee5hÐ dˆdÄf»v·–×߉~?´zõê…’‚6Š©©)J°OŸ>wîÜ‘Œ€l£Á!ýi¡ÂSîÝ»Ã@Öý­ï‡X. Ë…:d ̘1íkT*xRÖøèV›6m°·ÿþ2—«ÜqÛÑЖ×ÍvU¼lÞ¼ƀ삇}ö쮵téÒÄO;wÎÈÈv»{öì0`€¤÷Ur{ò§ˆ€ÀPF2?Å)ˆ>½¼¼>6lØVtàÀé™]ä‘NDò:AY:ò1½xU)ÞYJ÷mBcÐRaf’sž®%b¹(,ê!ðÇøøø˜››C'fÏž]Ãë€ //¯wïÞÐæ:œ:u꥽16Ÿ‚9(œ$ŒS­ˆ µPþ™Þ-ùÎ.É)"MéP†Â3eéÈ\éÿE&åÜÜ\ع:×Uxº6tˆå¢°\¨CzÏÝ»w£££ÍÌÌ>øà䆊¾šR$à™ç­9:d¨¨ÿ~Q×âºZ*êÞ3bÄ;;;Éj¬Zu1Ô!êuˆ:Dú?$''»»»ÛÛÛ8p@¼Ga éaaa6J(כּ 4°´´´¶¶ÖàÜòÞ°å lý!]+ ²š:¤[\»v-44ÔÄÄdîܹ’Á`¬Zu1Ô! (UNE’ªšÖ ÃY.ZÍjênÙP¯^½¬¬¬ºté"ýe ë€V] u¨&Ã~9]Èjên•“‹‹‹··÷éÓ§¥0gjŽE±¬©CÔ!ÖÀjãäÉ“þþþ&&&«W¯–ùÈŸu€:D¨CÔ!Ö@íR\\üÚk¯™››:T~9Öê¡Q‡XµËøñãíííÃÃÃÅW¢Ì™®CQQQª—! •…Š ÎY.U–ÕÔ¡êgûöížžž(¡””ùùIY´íbtP‡ŒI¢l‚s–K•e5u¨šÉÍÍk{O:U,Ä:PÅ.F§P67Ñ*Êf^g¹TMVS‡ª“²²² Í#=6ë@Uº]3 R-°\ª1«©CÕÉ’%KÜ^pâÄ ÊÐpµêb!Õ u¨ÚÈÈÈ666þá‡>|H[$„P‡¨CUÇãÇccc-,,  ½D!„P‡¨CUARR’££cHHHff&;Ž!Ô!êP•²oß>ooossóÍ›7«^;ŽB¨CÔ¡J¦°°°Y³f¦¦¦&Làw©„Bªj†jkk}ãÆ Ú!„P‡ª”5kÖ¸»»;::þñÇ2˹Buˆ:¤]²²²êׯobb‚'}ðàú'=ztöìÙœ§§*Ù¹s§.ýJ“ yP‡¨CåÑO÷îÝ---{ôè‘——WÞœquuå$=U‰ŽÛ!M‚æA¢•›9sæàé|}}Ïž=+½Æú95~üxÎÓ£mbccuvTš̓:D*©©©P SSÓõë×?~üX³œ=ztAA'éÑ6óçÏ×};¤IÐ<¨CÔ¡rPTTÔªU+33³‘#G2gt½Èmšsž:Ä.cÆŒ±··å•W®^½ªÙÔ 4}Ú!M‚9ObkȦM›<<Eßuˆ&¡¬|ËÊÊ]bª{TÌcÇŽI7JpK~~~»ví’Ѫ§æA¢ý/§OŸ011Y¾|yÅ׸S‘3ëׯ¯U«–ŒçÕ6ê_´ŠoOÞklÛ¶ÍÓÓó›o¾)..†‹Áÿ³fͺÿ¾ëMBµI@ÄÆÐÐÐÌÌLiÄv™£Q‡ô•Gµk×ÎÜÜ|РAùùùÚË™[·nEDD˜ššV¥ÓQÿ¢U{ò^cĈ¶¶¶;vìxúô)rƒ¸¸8éù, L‡hê„ÈØÃ@=ÍÍÍ¥Q‡ P‡>ýôS‡zõê]ºt©RÖ¸S–3]ºt±´´D=AÅvss»zõ*6&''GFFÚÛÛ·mÛ6--míÚµD¯wëÖ­Å›*xgÜ!¶¬Zµ ª)sʳgÏÐNœ2eJ``à¹sç’’’¼¼¼|||pVxQ…È™’’"ÒÄ)’ô/\¸0}útooo¤¿fÍÜÞûï¿ö²ŸŸŸ¸œHJÙÂM;v¬C‡¯½öZNN޼×hÙ²¥™™®‚‚@ËàÁ¯\¹R¹Ž¦´´tΜ9¾¾¾pm‰‰‰ÙÙÙ’V¶f»hÚ3 Qèééé±±±¸™J‹Ô!êðo¿ý†* ûÞ¾};ÚàZÍáX¥ßÌ㢸ú† à,Po¡…ðn¨ØÝºukРü¯ÐEœØ»wïãÇ—””ÈŸrýúõæÍ›Ã}#å©S§¢2ïÝ»Lj]òU†Ì‘Mš4i¢V7mÚTü¿`Á‚Û·o‹Þ3ø\~çàÁƒpCâr  Û%),Y²dË–-¸a<©¼×ÀÁÆÆÆ’-8gI¿ÆPæh¢££'Nœ¨æ—sçÎuqq1z.7xðà‚‚‚Šì’ æ32dî'$$$è\]]MLLh˜„DoCÉpÊ´iÓÄpVêuÈ øÎ;°l´õݽ{· rF¦žôë×ÏÚÚ ÷ œØµÿ~Ô[¸3áéà†~üñÇ{÷î);­K1R!¤KÝÜÜ\xp…uû¥½"’4•¥¯ìr ïM¤OÔD$!cuêÔ‘Þ‚,,,ÔÑ¡rMpÙ¨Q#ÜžÑ?XYYI.¡Ù.雑Ìgj$MB“ÖRø‰,[¶ìáÇÔ!êÞ0ª š´666­[·®Äq¨p_~ù¥š:T¿~}8,ÑØHî­U«V¶¶¶Ø…ÿÇŒ#J®ì™”¥jætT'¢â§šw¨pKãÆá­p®è‘„tÇ‘2G£l Q´Äå㡆 J+ ¢a‰¢h¶K:š9sæ;#Ö=n>àH)Œ‡h/5 iƒoݺÕÃÃÃÞÞ~×®]¾¾¾Ô!ê~ðòåËkÕª…GHMM­¬5îöìÙoõꫯ*›âW¦žˆæÿÎ;å»,Xçµxñâ… ®[·®¸¸Xõ)ºãtÔ¼C…[†W{äȸ*øYÿØØX™WÓ Š)DåsöìÙNNNEIHH¼rÐl—tãF¼Ù¥RÌ›7Oͦ MB…‰ùàééiffF¢éqgff¢n ¶hÑ"I}®Ož|½$ü¸jïµ»{÷ntt44õƒ>¨ ]BÏ7nŒgD¤¸páBéw4}5Qÿ-…ë© IP‡ D‡nÞ¼ùñÇ‹©m¡ø'!!!99¹  Ò¢lБŠ”ФóçÏOž<¹Q£FpÖP#77·qãÆ•ë+ôJgĈ_ägÏ,h‚Íž=[|&Ò¬Y³ÔÔT™•ŠhúÔ!êuˆ:TŽž%D*¾¾¾¢¶I“&ß~ûmaa¡:»úšôÛo¿õë×Ñ7ÔhÊ”)bˆmç”ÕÝÝÝÞÞþÀ¯qwõêÕ.]ºXYY!¨3fÌ;wäD—M?,,ÌF U<å3hР‚okkku®.îÜò\ˆ&aHæQ£uhÇŽ‘‘‘V @QQQ¿üòKqq±6äA¨QzzºDÂÃÃSRR*ku¸víZhh(²¹sçJ÷¡• dQ`` ñññÙ¸q£²÷^ºlú¥Ê©Þ›Ñø`êP5 ƒ1ªC(’Ñ£G;88@ @›7o~øð¡¶¡FˆE5j„`ÂÂÂâã?®ÄÙtTÛh¯^½Ä ”‘þ(R}ŠŠŠF…ƒfwëÖMõÄ\4}ºxšsž:¤Š'Nˆ0¡ÉÔ©Sóó󫲋 ×B!Ö¶‚ ¶oßA´¶ßÁ]\\¼½½OŸ>­ÁµcPk䘭­í—_~ùÒ%Ãiút44 æ|¸²fÖ‘çäÉ“þþþxäÕ«W—w9‚رø/""âСCêô%ÒôéhhÌyêb.\èææfdd4f̘¼¼¼ª) #E999qqq–––~~~; ‚âââ×^{ ¡ÌСCågéWÍÍ›7{õê…¨6lذÜÜ\5sŒ¦OGC“`ÎS‡‹«««™™ÙôéÓÅtñºÀÝ»wûôécmm­%)?~¼½½}xx¸ø¨[ý·lÙR·n]„kžžžëÖ­+×bá’ ®ˆC*—N:)›UV×¼!M‚æQ£uhÁ‚!Dßÿýƒtê‘=z4xð`[[[HQåvÐmß¾*+,ר}ºuëÖP/Žš ç‹ÐL&UˆÂu†tšÍ£Fë*€¿¿?³fÍzé@¯êí Cˆ­]»v7nܨà»+œ>`ÀhILLŒêé„%@ü/^ìåå…¼jРÁž={4ž}®ŒTº\oY:4š«C7oÞë^;¶°°PÇŸíúõë"‚?~|¿+Z²d‰Û Ä´ó/=>//¯_¿~¶¶¶¡Aƒ!ßôb BÑi‚ÿíÛ·/b‚öíÛÃÅëE‹`ïÞ½b2ì­[·j<ÛBFFFpp0å‡~Pg|Áo¿ýfffÝZ¾|¹nv]BˆþéÐܹs===;¦½¯s*½ûbòäÉŽŽŽ†Ë—/k ?޵°°0`€ôbÊþì³ÏÄbϯ½öÚ©S§ô%£!D×uèÚµk 4011A¿\cŽ«{÷>úè# zçÄL !!!™™™ªeìüùó111–––-¨Ñ_ýÅNdB©4zçwlllÔ‰ tßÿaœ‹‹ ”r½§Ù·oŸ···¹¹ùæÍ›UŒ2€Þ@ž}|| ÓP¬;wJ¯²L!¤¢:tôèÑÚµk;;;«ù–^×€NtïÞ‘J¹Fý6kÖÌÔÔt„ *Î*((4hXDµÿþ9$B*Y‡zôè'w\5³Yk5$:w:1tèP[[Ûèèè7n(;æÀfffNNNß}÷Æ @¢h#Ξ=›ÓT;;wîäEZÔ¡?þøÃÛÛÛÁÁ!--M[ú‰âââ,,,¦L™¢ÎW¨kÖ¬qwwwttÄã+kðôéÓéÓ§CØŒ¡Uˆµ7Ç+} ý‹2¾úê+±’/©^8Âvuè½÷ÞCX0|øpÝÿ`H5bùÔzõê]½zUõ‚¬¬¬úõ뛘˜ ’+œµtêÔI,¢:nÜ8m/uA_Cÿ¢Â6ÄDsãÇç4ÕBll,gœÓ®ݼy388ØÒÒòÈ‘#úøfH„5-[¶„rüúë¯*à0ñ2©Gyyyò¬[·. *åïï¿iÓ¦*h,Ó×п¨¶Ñ£GpŽƒjaþüùœ[»:´xñb´Äãââ4[uT×øüóχ ¦"¶›3g¬Ê××÷ìÙ³2ý÷îÝùöööFFF={öDTT5ÚL_Cÿ¢Ú6èY†¬C]»v“jƤ‡òòòrwwWöMkjj*ÈÔÔtýúõ2c¯=Ú¬Y3sssè,¯*çÖ£¡Ó¿Ð6X5T‡233á”mll222 ã“ÌgÏž½òÊ+&&& »æ -­Zµ2339r¤tÀ„ çË/¿¬U«–±±qddä‘#GªxÙY:ý mƒEPCuhÙ²ennníÛ·7ŒN9Á|`gg§pn…1cÆ ÖPIdÈÎÎîÞ½»µµ5‚¤Q£Fݾ}»ê%™†NÿRCl£OŸ>¯¾úêÍ›7+1M´§'Mš€zŸuêÔ1ý‡¬¬,õÓQv"«§vuèã?vppøä“Otg¹ÕŠ#FÍõêÕ Š"½}Ó¦MþöìÙ# w6…„„Àìj×®«kB#ú­ú½ö/†g-[¶¬\ÛhÞ¼¹™™™‘‘ÑåË—ñ388ÿ_¼x±´´4%%U厶é–-[¤ûH¤­BFé ‚‚‚$IQ‡ªB‡:vìhaa±nÝ:™¥&//FP®&Œ<çÎóññÃB #Ù»711™9s¦xñóàÁ„GaØ\çÎ/\¸PÃék*âk$Ç&&&º¸¸´jÕ*==]²K¯ý‹Š›¬”úRÅ•ôù *÷–ž={&]¸Â6ÄÿeeeÈ71‡$š¡ÇŽ“¶ <…ŸŸß®]»¤ ôéÔ!­ëÚþõë×766>yò¤Ž|¾º~ýúZµjI[€fÔ­[>KòY®d=‹ØØØ[·na ŽŒðhÖ¬YÕÒ×TÄ×HˆŽŽ†8%$$4mÚTº!¢¿þEÅMVV}©–JZ¹H®|ACÄÆÐÐPÖÒÆ‰í2ÏBªRÊÈÈ@Ü€ õÚµkºp£P±¨]ÅM¼C‡æææ’0|Ñ¢E®®®h A™àÎ.\èéé F„´ÿ~W-¢¯Ñ)_³mÛ6ë7ß|S\\Œ.þG Cz&Ó¡J¬/ÕRI«Ò6ÄÆ˜˜[[ÛvíÚI¿§U³:tÈÛÛû_ÿú—^Jrrrdd¤½½}Û¶máÐ×®]‹ª.ºV[·n-1b„ƒƒ¶¬ZµJõçŸûöíƒY 5˜õÁƒKJJºtébii ÀénnnV$]·‹/¼j¾xLLLD #\ÒéÓ§ÅG©Ë—/ǹ½{÷Æ.ˆÐ!CpÏ:2J¾¦â¾¶/³cÇ4,(ô¸¸8éï” L‡dê‹xm¦A%•9 5D S¦L 00¯X±BýùFÅ£ÁùâDx1ü…»üí·ßTL~Z.P]?~üàÁƒ»wïÞ¹sµ¾žÞÿìÙ³éééDŽ9ràÀ={öà1‘lÚ´ ÉÏ?ÿŒø‘Á?üðí·ß~ýõ×ÿó?ÿóÅ_@2•M¸I_£¦¯ÂÁ×ã1EAçàÐ¥;UÄCü±ü,¨(5y“kذ¡´¢àö$Š¢Ù.ùxѼômÄÅÅ©iPI•R)¶ 2nÜ8DÃ0¿Aƒ‰’ôéb8Ìœ9sP7ÑÜD)ˆoÏU\ýÌ™3-[¶DÌ -A¬¬‰Ê"2ÄÓÓÆIÒÓÐÁéÀá¢];Œ²ÃOHÜ ¼†|“M!bÙ=üÍÍͽ~ýzTT”Â~91Þá3ÒG³ú¥«9È<œ²¨ü8wðàÁÇ÷Ýwßyçöïß¿wïÞ={öÄsuêÔ wÞ¦MøqÜI“&MÐzÅ).\Í%«›››““ÄmÃpM•c¢¢Í«ŽÑ×(»C”/ HL ö÷÷•¥Ð¿¨˜^VþygÏž K(Š$Ôx— Ê&cUXOežHƒJªì”J± Ѥ8qâ*j臗>¹?üí$´`E’ŽWG=EÜ µFKEÙ]½T‡D>£º‰E–©Czó~hãÆîîî"¼€áöéÓq€d/ Pÿ{X1µRƒI!Ù·ÞzK¤&º/à1ÑúƒùB*Ð&EšpL>>>;v¼té’:o‰Äû!ܧðï¢S¾\©™`ee#ÆUàd½¼¼üüüЇt………EDD >GGG·nݺmÛ¶:t€ÔuëÖ ²‡'‚¾ýöÛ¨™Ã† 9r$Ô@x$¨ŽÑ×(»Ca¢‹-Òê¸mDðH‚Šò‚ÙK/¯Ù®ŠôÙJ×”¬•Tá)÷î݃!!¯Ä—…2?e.ªºEÌ]róæM4ò`~2IM:5##Í#4$¢úê¨_¢+åðá₆"Eécä Žº{÷né˜'OžÄÇÇ£D¨CÕ©Cÿýïa‚ÿþ÷¿Õ/‡æÃÒ¥KCBB¬­­;wî ë‘~ƒ k0`€šã¿EjPÁ€€¢¢óçÏ‹Ô6oÞŒ‘‘‘©©©¢;ƒ-zQ[Ô·-ÆËI^‡@&L˜€p±Â?ü°bÅŠÕ«WÿüóÏ6lÀ!uhªïÝ»÷àÁƒð}p©§N:wî\ff&¼*Â5´©!ÕhÅ£íÛ…s,UÎsP±}š¾éCï¡ap:{´d>Œ«,ÿë•”u¥ìÒX‡¤ë ÌRƒJªðQÝlÕMæ§ÌEUÜ92•.** ÿüòË/°I™¤BCC¥û`¥å¡Ö*®¾råJ ÁãtïÞ)Ã6|}}¥‘ïB®@¼"•v ¨×h·iÉNˆZ:täÈ”¨Â÷ŸT¿òÖ.…©‰Ø"Ù+*’´£W'ñ!C†ØØØ :TÒâââòÍ7ß u¬æÊ4º3N¾F_#ÈËËCĉBG ŠÆ„ŒµÞ| ÒõEãJ*ŠLu“ù)Q¯QÅEÅ?òI¡Á„ƨèîFãÍGÐK¯.“²Ì1òÝû*Z™2NªR}èÈt1XÖóæÁ™ŠAÃb´xýõ×e7}aøéNBewnx:¤ïìÞ½û×_EëPë7Z¶lYñé]ÊÕ«¬ÎéÔ!-êÐß/ÆÔš››Ÿ9sFGæ—«,Äs¥¥¥ MjÞ¼¹›››ø0ó»ï¾ÓµEÿèkªÀ×P‡t „Ñ“'OFƒþçÁƒ3gÎTsÀ-uÈ t¨{÷îh¥*\2®‚„……Ù(AÛSvŠÅý$óæ!B{ÁдiÓºuë&b#üS½lÓ×P‡ À6*XÍE¿««««]‹-~ùå—JYx¥Aƒpk¨éåõ6âq,_Àõ‡ªN‡Ægoo¯pɸ ¢âM¾¶xË–-:tŒÙýñÇaë...‡^¶l™¿¿¿xQñÃ?èH`D_£U_£×þE—o²‚Õ\Òïªf'°wU)'R‡´«C›7o†ËîÒ¥‹ôtúŽ,'=OL|РAðDbq6ÄIo¼ñ†Œzöì©æXpúýõ5zí_èY®CðÔÁÁÁŽŽŽðÅ:2ótśʌ]¾s玘ßpâĉþàùøø@м¼¼–/_^]+±ÒÐ™í´ ÚIÖ¡¿ÿyEôóÏ?ëÈ’¬áNPPƒƒƒ˜ÞFz×îÝ»===ñ°b]"ì½råJ=¬¬¬ F½{÷ÎÊʪ®Àˆ†Îl§m°j®}ùå—Èb5¿fÕ}æÍ›‡Ç‰‹‹“žÐEáË#«üýbRÎ%K–Ô®]Räíí½zõjõg±£¡Ó¿ð&i'¤tèòåËbY”³gÏÀèm1‡ÍÒ¥Kö³!æƒDYXXôíÛW2 éÒ¥K8QFýúõ»zõjg þEõMFEEMœ81‰Tbé VO-êèÕ«\ðçŸ.†Ò_ÒÒÒ|||jÕª% wäÁ®蘈L²½¤¤äÛo¿õôôÄ.óºuëª20¢¯¡QmƤZQ6 1©4JNNvww¬¢¦§ˆõ[ß}÷]e‹Œ V¬Xáææ†º}äÈé)=ðìP©Î;#¢255}ë­·²³³«&Cèkè_”!¿©®B*M‡?~,ÖT†7|ðàž>çÍ›7ƒƒƒØ:tHõ”h–ÄÄD<µüòzŒ¾þúkFÐfÉbÕô5ô/ÕBÑ('ZÔ!°víÚZµjéuH$Æ ÄÇÇ«3à"??_,_=~üxùox‘;vÑ Aƒnܸ¡Õla §!¤¦ëˆöíÛ[XXL™2E,&­_\»v Átå÷ß—YãUûöíóööÆ#oÚ´Iá´FOŸ>7ož»»;£   ÿþ÷¿†1´BtQ‡ÀÁƒ}}}]]]?®ƒ3R«æí·ß¶±±0`€ôbÿüs„PИóçÏ+›¦úÌ™3íÚµsE2$''‡ÍgBÑŠÁ½>ÜÎήM›6Ú\¶nÝêéééì윚šZ.E$F%ÄÇÇ«06wî\777FuêÔŸÁÒ°!¤’uܾ};**ÊÜÜ\zçòóó###MMMçÍ›'½´š\¼xÒùæ›oTÌy UNOO‡B[XX Þ{ï½ÜÜ\F„RÉ:öîÝëããcmm½mÛ¶§OŸêþãõíÛw«p5ùé§Ÿë8::>|XÅ@;¨ÎãÇgΜéêê ݪW¯ÞöíÛBH%ë¼íŒ3œœœBBBþüóOQ´`ÁÑ#§z¬¶êG:t¨­­-bÁ7n¼ô`dKëÖ­-^0räȼ¼<F„Ri:ô÷‹hzöìiee™••¥³NvýúõÞÞÞfffŸ-»   Y³fHjìØ±/ý¤òèÑ£iÓ¦Aÿ…††¦¤¤èEìH!ú¡CàîÝ»íÚµC{ ]XžGž?þø#((ÈØØø³Ï>«”Eü<U377߸q£:½mP£cÇŽµlÙ§ £Þÿý;wîh¬ÙG={6'Ú1`vîÜ©k_ËÒêh:­C ;;[ô>µiÓæêÕ«:8p@ .>|¸ê)|4è ÈÈÈPGzqJqqñäÉ“q±aÆ»wïÖ,0úꫯ\]]9ËŽ£ƒ³Ñêhº®CÒRÔ¼yó³gÏêÈ»¢;vˆ‰J+Q„ƒºtébiiÙ«W/õWÁ€>|XŒ3Ĺ~øa~~~ye[2Ïéøñã9ÑŽ«›³©Òêhz CBŠÄ`åúõëëÂ[U«VyyyAÛGŽY¹"$¸|ùrݺu!råšjªƒƒ?ùäGGGÜ[DDľ}ûÊ•WÂ#Œ=ÅYv Œùóçëæê´:Z ~èxWôïÿÛÊÊ Šš””T]ß¡Â888M˜0A{ºfÍšZµjáB//U†çϟ㔦M›"0Bv}ôÑGê+%×2`t¶piu´@½Ñ¡¿_Œ ›>}ºx ÒºuëS§NUqÝÑ£G_}õU„e666?þø£ŠN+åEѰaÃlmm›5kvýúõòž{ÿþý‰'BÆWMš49xð :óÝÑ#Ð ðÆuèåvïÞ½uëÖ555ussûôÓOoß¾]ƒ>|ø?ÿó?pëAAA»wﮂOGQZQQQfffï¿ÿ¾%‡Àhß¾}7F "‘W/M„^€7F¨CjIQvvöСCíìì  ~~~óæÍ»{÷®–Ô!ך5kÂÃÃÍÍÍáЪr¹î?þø£víÚ¸î/¿ü¢Á4ÛÈ“{÷î‰uùWÍ›7?|ø°ŠÀH=BŸ>}ªÊ/ãT•dffNš4) R¹·T§NÓÈÊÊÒì`ê­N{V§/hT¹ÉA 8 zÉLLL F(ŒœœœJTdÖòåË[´haiiiddÔ²eK„AU¼æ„dÖ¬YNNNxÀsçÎiöt8 w=³µµMJJRö©“^{…Ë V%b))˜ÊåË—+÷–‚ƒƒ‘ìÅ‹¥;¢ ]\\Zµj•žž.ÙUúDí’;¡Ñê*hu)))hCWÐú—™aYZu„ðè²UzŠðÑOŸ>E°Ò A”Úûhõ÷ï߯ xNÃ#äàÑ£G‡ æãã#’Eú«V­zðàAµ|½„gìÖ­´ðßÿþ·úøåó y2fÌA\NA¤ÂPòòò`XåjUõ/úüÕ땟ÒuO[Ró…G®Ò >>>::n1:œQvv¶êSôQ‡hu:buˆE>=<<Ž;&ÝîARh(ïÚµKº™®›h¤¥tÅô6?ÿüs§N Õ&/€·…מ?þž={à»_Z0> |²Û¶móôô³³£Lñ?âfé‰Þ C‡hu:eup¶bchhhff¦´WÄv™k–I2™’žžß*ü,„Dh’……Ä*õöÛo# ˜4i’˜^b„ PÎ;7jÔ"Sp°¤0vìØ´´4„Ÿ:2ƒ„V ã>pà€ÆS©ŠŒ*((1b„ÐìV­Z?~\’ 2C¹uëVDD‚œÕrѪôê? |²(A[[Û;v \Fa¡ñ‡vnåz4uçÌ™ãëë‹k%&&"ä’¸Ív•K‡huºfubcLL J¶]»vÒk P‡þ“‚tæÌ™E‹ <9nmmýÒ©& BPø¾}û"$ºtéê6Ñ©9„Äò€(þ&Mš”w·ÂÀ jÑŸ‰ÀM颢"†"æw€U‰aŠâhrrrdd$NoÛ¶-4{íÚµh•‹>bÄ‘0wá.¡Ø²jÕ*„­2§@ÿЪš2eJ``à¹sçÐ8ðòòòññÁa8XáE"IÇHþ¿pá%ÞÞÞHpÍš5¸Ÿ÷ßZîçç'ÒWÿHqù›ÛSSSQ'‘o .ôððuOú–DO=HÓÊÊ ­œÕ«W#W'WÅUä«tË–-Qv¸sað8ù|åÊ•ÊõsçÎuqq1zª *”ä[4Ív•K‡huºfub#šû±±±HdàÀùùùÔ¡—GHÏ|êÔ©7.[¶ 5dòäÉ“^çûý÷ßoÞ¼ùìÙ³ÅÅÅhÄéšöÈp÷îÝ-ZÀûŒ=ºâ‰'… ½ûî»Ð6F¯½öÚÉ“'¿üòK…†"<ô«òíÛ·£önذ55§^½zhö¢juëÖ ò‡(r'öîÝ!WII‰ü)ÔæÍ›‹·¬S§NÍÉÉÙ»w/Ž»ä/ª éWµ’ÿ,XpûömÑ…ªŽôQÕÅ4²"}õTø¼bûµk×êׯj‰Ç_±b*¶ÌmˆzˆÊoccƒ4:T»vmwwwœ¨N®Š«ÈWiì‚‹—lÁ¸œt¿2/ЫW¯O?ýTÍÙ'5jÛ0ú¸3É%4Û%™Ø´S§N §{¡ÕéšÕIô ݵ¦M›&š°Ô!uã$eü­?9rDŒžX¿~}¥ŒÜƒQB‰ayh9::²!K Eƪúõëßwƒ…»»öïß›F+L4á#~üñGñJá)’·¬C%ÐæææÂs©Ùá ýªVa‚ ÓWÿHÏ;a´"áÄŸ¾@þ6Äa;wFjð"YÉxV5sU>êÔ©#½XXX¨£CÑÑÑðMjÎ>Ù°aCiEAXr ÍvIîDfbSF«Ó}«“Ö„M"M´ïfQ‡j°§9sæ ,¡Fã*eôÒD»,11 ¤¾G@s ‹&•@’Z«V­váÿ1cƈ®§È¤,ýSýŽog©Ÿ¾ŠŸÊn¾qãÆ¨À/M%ÖCûöíE¯ˆÂP3‹$×Å‘â} ¼<»t'’Šxèã?–ŸƒbùxQ‹“““DQ$ý0ší’ÄC3gΔ\:..NÙô—´:²:iB>oݺÕÃÃЏk×.___êP ¢¤¤D̳׭[7é÷ÒŒ6nÜcRÖ4–±*Ñ-2™#Åê´‹/^¸páºuë$³);E/<‚²›¯[·®èâPø™3gZ¶lÙ´iSTÝÚµk+»5³ˆ—…ˆá5àsýýýËʼ7VèTL'ªÐØfÍšåíí {4hèÕ©È.…ý*¦¿¤Õé”ÕÉô¿¡ìD¶{zzš™™Q‡jhö6hÐ1b#顺ŒÄô}êx„®]»¢…“““[‡»»ð300°E‹h…áI ÊNÑ  ìæ;uêdaa‘œœüøñc”‹ŸŸŸÄAHŸÞ¦MsssÄhŸª¸5³ Ñàîî¾hÑ"mÛ†Uˆ6²|ð­Ù.õÇ)ÐêtÍêäß¡ Qpvvv ¤80A¸!DÄûöíSgS5QÑ2/ŸÑhêÕ«ŒUøAOCqŸ>}îܹ#9­`´ÖÑ,•~‰¥ð”{÷îÁ}À^Ož< Ÿ%óSæ¢Ên[ú,ܹŠ%?Oœ8¡þ‘ð§ÊžW|ŠyúôéC‡!À)ØrêÔ)éÔÂÂÂpnJJÊáÇEËT¼áS3Wå«4n ÎŽ.£ÿþ¬Ë^ ¼:D«Ó)«ƒAóvïÞ-ísž}åÊ•pð È™îÝ»£ŠÂ#"Q½h Q‡*™¢¢¢W_}q÷ˆ# µíDO‹ôÐvÕÝ2 ;däO)•Bþ§üERªÕ髤êç•l—|* Ù"ó ’cÄ?ê窲ž"‘”Âü1 ¢ÕéŽÕÉß¼4¹¹¹2šMª);vÌ××m“µk×Êvª\@ª‘òNÜb0:DôËê¨C5´wN|yZ»víÓ§OW|7=uˆ:D¨C¤|”””ôêÕËÊÊêõ×_¯ø0n]öaaa6J¨âù˜«ž XZZZ[[«ó°"£,_Àõ‡huUcuúbÔ!m‘jjj:{öì ãÖePªƒ/âr=¬²ƒ©C´:íY¾X uH‹ˆ‘—¶¶¶{öì©È0nö0Ô!‚¦i1—‰]DDÄÕ«W5ÆM@/À#Ô!¢!EEE­[·6776l˜ÆÃ¸éèxc„:D4çøñãb÷êÕ«5ÆM@/À#Ô!¢9eee¢¤½¼¼N:¥Á0nqzTTÔĉ“ˆa¡l™qO´:Z uÈ@xö왘ëIf•èrycb ([ÔCtˆ¥C ¤7nÜ 3551c†X*Q}d†!†‡Âu†ªZ-:d€lÚ´ÉÃÃÃÆÆæ÷ß/×0î2RÐÁþdB ¤à‹¢±cÇÚÛÛ7lØðÊ•+úµö9!„P‡ û÷ï¿öÚkæææC‡-((`†Bu¨ª9yò¤ŸŸŸ©©éÊ•+uí•!„P‡ Ÿ²²²¯¿þZ¬Ÿ––VfÄ"„êníéÓ§µµuÇŽsss™!„ê©jnÞ¼njj:mÚ´òã&„ê©¶nÝêáá¨è·ß~{úô)3„B"UJYYÙ¸qãìííCCC³²²8Œ›B"U̓Ú¶mknnþÎ;ïp7!„:Dªôôô€€SSÓåË—?|øB¡‘*¥¬¬lÑ¢E...îîî'Ožä0nBuˆT5Оþýû[[[·k׎ø !Ô!R äää4jÔÈÔÔ4))‰Ã¸ !Ô!R ìØ±ÃÃÃÃÊÊj×®]ÆM¡‘ª¦¬¬lâĉ 4à0nBuˆTÅÅÅ111 ùùùÌBuˆT5§OŸ 411ùñÇ9Œ›B"UMYYÙwß}çêêZ«V­'Np7!„:DªšçÏŸ0ÀÚÚºmÛ¶·nÝb†B¨C¤ªÉÍ͈ˆ0554iÒ½{÷˜!„ê©jvíÚåééiii¹}ûvã&„P‡HUSVVöÉ'Ÿ888Ô«WïÒ¥KÆM¡‘ªæÑ£GíÛ··°°8p ‡qB¨C¤8{ö¬Æ=xðàI“&%ɱcÇÈ•ü‰G={6Wçø~øáþýû46B¨CD1K–,quu5VÂèÑ£ÿúë/ù³¾úê+…gñxùã!ó_|ñ¥ˆêQÌóçÏcbbÌÍÍŒŒFŽ9éÿ²mÛ6…íý#GŽÌ˜1c’<^æxgggd¬2¹"„P‡Èÿ2þ|¸KˆPAAA™ O)S—>~Þ¼yÈ[ê!Ô!¢Š¯¾úо’yKuˆÐW ¢ƒNY÷!„:D¨CÚEu÷!„:DØf'„P‡Ûì„B"„B"„B¨C„B¨C„BuˆŠ"&>þ?ÿù$ {cbb¦NŠbofff=powïÞÅaH *(oÊŽdáB"ºÎ¥K—: pƒ¤áâ¡""Âxï½÷¦H¿Z½zõ¤I“®_¿Ž&))iòäÉ"‚„ôïßÿæÍ›8}Ó¦MÐ'ɘº‰'":IOOúô)Î’¿ÄÄD¤#ь޽{ïß¿‹8iÔ¨QW¯^'B« jOž<‘¿7Gjç2'„:Dªˆ & † xðàAxx8¢8ô°°°H>âi×®ÝG}Ý!H‡ 9?ÆÿŸ~úéöíÛ¡ï¿ÿ~Û¶mOŸ>- SpØîÝ»…®Èƒ³»H‚$DW-Z´ôÝ!P[»v-nIü2dˆè¯“¿7eGj ç2'„:Dª¨Í%=(`Û¶mC‡½}û6"¤fÍšI$áÎ;uëÖMNNÂÁhܸ±äƒÖèèh—×_ýûï¿/**’ %ÏÏÏïÕ«â$eWï×¯ßæÍ›EšàÃ?4h®.~¾ùæ›§N’ýûä“OÄ`n™{SqdEtˆsUB"UÑfG`!yÏ/úÎ;Ÿ½÷Þ{’Ÿˆ?$/‡ Q/^D˜*,,„@ iÁYHDr˜<õëׇlˆÿùå—W^yå‡~(..[$=„Hyøðá8@(®Ì½©8’:DuˆhW‡*î+ûöí»páÂû÷ï#âùì³Ï¦M›&Rƒê,Y²díÚµŸ~úiAAÁøñãW®\)Qˆ#F|üñÇø+ÆÅõèÑ’õÊÌÌ|ã7öíÛ':âptEÒ]&O‹- i>\´hQŸ>}zöìùÅ_|ðÁ"š ß°aÂ)<éÀq ܌̽©8’:Duˆèºݸq£K—.7^±b…D3UÄÆÆ6  Êi×®]ZZšä­O|||×®]Ïž=+¶¤§§CQìíí!B§OŸ–~¥tüøqÓÄmݺ5äóæÍ»wïÞ˜1c:wîŒ ‰S |ÞÞÞHò6räȸ¸8¡p2÷¦âHê!Ô!¢ë:ô÷‹¾,ȉèU“Þ(ü”ß+ù‰]ÊQ=íö–¾@&’•œ"öŠ Iï’¹7GR‡¡=Ð!¼%„:Dè+u ®?DuˆP‡ª®?Duˆ°ÍN¡¶Ù !„:D!„:D!„P‡!„P‡!„ê!Ú…ëB"¤:áúC„P‡ÛìÕ¬CüF˜êa›:DuˆÐW2o !Ô!B_ɼ%„:Dè+™·„ꡯdÞB"ô•ç2'„:D¨CÕ ç2'„:D4o³‹ïŠ’äPö¥W}馛îèJ’dô}~÷õù¼îyÏSͶÕÔãñÇzý8¯çt¶×£×yóúË$&á`r€É&ðŒLnqqñ€Ø,--ñßà‘šœ ÕˆÍÛ·oùoð¨M®´´ô-ÀŸ)))ÁäÀä­!î L09ÀäšÝÝÝŠŠŠ[©ªªÚÛÛÃä“{HVVVÒÓÓ}>ß­JMMM¥¥¥­¯¯cr€É= ûûûN§srr2޲###©©©‡‡‡˜`r€×ëu»ÝqÏÉÉ©­­Åä“»o677å Ä]C¿Ô°½½É&w#ýêèèp:/_¾,--ÕŸºýÛ…0DÕÁ)))§§§mmmv»ýÅ‹zoƒÎÎN9l~~>îÎøý~©¡«« “LîzDË”¨ÍÍ͉HeffMN OGEøTxttTdnaaAÂ999êÈÂÂB‰žŸŸÇÝ™ããc©¡  “Lîf§úã«F“3FC¡Pd–ÕjUa»ÝnÌŠU¿ÔƒÉ&w “3I‰|Øz“¬¸;£Õ0±L®´´ôçÍââ"¿8“KT“Ssr¡Pè;“ sr ×_€É=¼ÉY,–H]SŠfbrj\0Œ»'ggg‰»Nî9ÏÉar€É= ê%ƒ0“ËÎΖ”………ÓÓÓ>¨RSSõÁ0–UÑÈwWÅíŒZf€ÔÐÑÑ‘ˆ&÷œ×É©+€É&wïçùw²²²ŒF•‘‘a³Ù^¿~}vvæt:Å®ÔçâŒ% ‹nooKÀ¸OWQQ‘èÚ £ÂÐÐÔ ar˜&wßÔÕÕ•––Æ]Üãñx½ÞõL“L.±9::JKK£ìää$û®br˜ÜC²±±!B611q«R>Ÿ/==}ee%q=“Ãä“{ ÔÔÔܪHeeåÞÞ^B{ &‡É&˜&ð{L®¤¤ä€?#w&‡É@˜@,09L¯É---½ˆÜ!˜¿8à‘š`r€É`r˜&€É`r€É`r˜&€É`r€É=çççoÞ¼±Ûíuuu¿µ!‡Ã!#ñÁÁÁãùÌÏÏ[ryy+*}¾“ÝZå [,–ããcî{L0¹;£²²rxx8 eddüÖ†ÄáÜn÷cû–——ÇŠŠxÝUŸåôóòò¸é19ÀänÁÐиZ¬ÜéééÚÚÚû9Û±±±®®®Çö3øøñc¬¨ô¹»»ûNšššº«ª09€garûûû¹¹¹_¿~šûãÇŒŒŒû9Ûúúú………Çö3‘][[3‰~ùòå Ÿ>&p¯&777çr¹,‹ÚRÝ(m¯_¿NNN¶ÙljîgrrR-ó ;RQ]]­³UâÒÒRaa¡”òx<âyÆãM²L”Þ¦¤¤¬¬¬È¿¡PȤÂíímé¼/á`0øêÕ+»Ý.5˜œc$r.­­­ê©A%J»ïÞ½KMMs]^^NJJR+á$=--M—Œ:Îõõõââb©-;;[ΘµÎX}ª¤HKK‹¾ìóóóüÀäày™œøÁêêêÕÏõøÚÏ$Åív‹º‰LˆFˆ©ôááam‘H ^¯WG?~ü(²òíÛ7 û|>Ñ—›dÅâôô´¬¬¬««ëäääèèHŠÜ¤-‘¹ÆÆF9¾²²RNjzzZŒÓüˆiIµ>|¸¸¸m’°êIyyyOOT+‚XUUÕÜܬŽÿòåK}}½.×ÌÌÌlkkƒ”ÚFFFT…æuFíƒt^Èîî®”ªxõ“€çhr………J€4âEEE¢*ÚßßßÒҢµµµ¢>±ªêîîRá­­-q]‰`³Ù®Í2¡¦¦fttTGEãDn®­p``àÓ§OMMM*wvvVDÊü5’ër¹677ÃÒŒsiƦSSS:+,*¢¦Z×X­Vó:cõáýû÷r©'&&òòò¾ÿ÷³¸¸øÇs¥¤¤DLNþýb#~ð‹àQ›œ¸B~~þçÏŸÂ1??/z$ªTPPÐÚÚªÚív“/hH=z‘œÈS ÐYÁ`Ðáp\› ŸÏgœÜWÓcRaYY™(šñHµ¶,Ö9“ã#çÞÃêI>ÁívïïïÇŠŠuéçÎW?Ÿí¾|ùҼΨ}Pç•••e±XÎÏÏ}^ oß¾å+À£6¹«ŸöŠŠŠÄ–T4##CL¨¼¼¼··w{{[¶ººªÖœÅªÄétêhjjª1wbb¢¡¡áÚ¬X”––{ÒÑÑ‘™™ym["gâ:ccc*]¢jm™É9IOO÷ûýa‰âXÆW:$,²¥ÂbiúiidTŒÙãñ«êû‰yQû '’’’"ò*¦ø‹ï—(““ËûàϨ9KLàQ›œü¦VsQÁ`P¤A%ŠýÙÙÙQOúo8m¶ìLœC¿Âi’ ãã×OŸ>IC­­­×¶5;;›ŸŸ¯Ó§¦¦tc£9Æ8Q·°°pvvfìÉÉɉ¸àúúºŠŽ·µµ‰À©GÌaÑééi±F£®½|ùRô7ììÂêŒÚ9/u""¬Æ§·q›£5po$ªÉuww‡B¡¥¥¥¬¬,•X\\ÜÕÕ%Æ é¢#"CêUÐÚÚZѿ߯f’ÂxýúµÑ*¤µ¢n_üÏøM5“¬XˆeŠiIÞ¿/B)'zÔÓÓc^¡ˆÔÄÄ„®DìGŠ+ŠuŽF<T%I£íííW?7–’(×A.]KK‹X£z6*õ‹ÎvvvŠiE.//+WÍÎÎÖ«ßLêŒÚ9/u©å¬å|Em#ù1Z&ð,LN°X,¹¹¹zhwwWÌÆjµfffÊïqÑ•îõz+**Ä¢ÄE"«ÊÉÉ1®Ù’Jòòò¤©jnnÎx¤IV,fff’““Å5%põs.ª¼¼\¯Ø‹UaØkrŒœ‚ÚÝ+Ö9Ó‘’‹#Mëul¢}III.—kxxXù™žB+--•úU#£rLuuµÍf“âr æLêŒÚ9/u;;;r MMM‘‹üžÀh-.7•\1ù™¼góx–¨vJJJXºÜlòçVUUUUÝÉ–n˜À7¹»BÄâõë×ü˜w¹+>þ,ûöí›üyÐÒÒb\ùh‡‹üÔâÊÊJzzº^„zC¦¦¦ÒÒÒôWܘÜ勵¿?Ö{€ÉÅAee¥tÌøåçÇô6òóÚN§3¾ Å‘‘‘ÔÔTãÜ-÷&wÇdeeÉèõéÓ§wïÞqõ1¹;Än·Gî#’ÿÿlr^¯×ívÇ][NNνíbŒÉ>– ðÞ(--}æû•-..ò‹09Àä~ÉäD•$ª>#¢&ᢢ¢k³L°X,a{à^ýüÊ`ät`ä{ Öæ655é ¢’Æ#}©Ÿj:ìÃ×÷|o€\~Q&˜Ü/™œÑŠ.//.e’eBVV–™->§¿Æ'£ºW¤Ééhrrr¬¬;YêwÃÓù}÷Æsž“Ãä“LînLÎf³IôââBç:Žk³L8==mnnV­¨3®~NÔIT=u½¡É©"úk)‘sr¿ø!•Ÿ“{ÎëäÔÀä“LîWM®¬¬L¢jõSo¡k’u-ËËËuuuZ•<¯ªª’zöööRSS¯5¹ÜÜ\ «}ØÔê:moj\0Œû"œ=ø:9L“L0¹Û±½½­”Ho†ûõëWå4çççïß¿·Z­×f™›âÎÎŽÄØô“““zu”Ô£v³U¯ˆŒŽ«ÖŽŽÔ*=A)`仫âvF-3^ý|'WjèèèàÞÀä“H˜Ñ:ò$‚ÏçËÊÊÁòx<Æ€˜g™41==-çr¹¾}û¦Ó‡††œN§Íf«««SÓia‰ì›q8js-)ÛÞÞ®TRù¨qŸ®¢¢"ѵFUÍRÃææ&÷&˜£õ}#:XZZwqS¯×˽É&Àhý¥¥¥ŽŽÆQvrr’}W19Àä­’ ²‰‰‰[•òù|ééé+++ܘ`rÏq´~<ÛøÔÔÔܪHeeåÞÞ÷&˜£5po`r€ÉÁÓ«JJJþø3rW`r˜`rc@,09L09x¼,--½ˆÍ ¿Ï‡É`r˜`r€É&€Éq)“Àä09L09¸ü~¿Ýn!xÀT“Là144TYYyWµmmmy<®*&€Éüvö÷÷sss¿~ýzWöööra19Lào¶¶¶&''Ûl¶W¯^IÊöö¶„­VëÅÅECCCjjª6³×¯_«#»»»u ¡PèÝ»wrXFFÆòòrRRÒåååää¤Ô w80¯A«iEYY™Ôÿd®üâââ3Ù¯ìþáJJJä_¶n»ÏùkÒ˜ÀXYYÉÎÎþðი“™„UúÖÖVUU•ßïÅÅÅ’²ººêv»ÅÏÄÒDþìv»:òôô´¼¼¼§§çèè( J©ææf•5<<¬ÔP«†0"›VœŸŸ;Χ7SÀo˜@<ˆ{¹\®ÍÍÍȬ¾¾¾ÞÞÞ?~¨èññqQQ‘¯¢ýýý---*ÜÐÐ :¨ ÖÖÖ~ùòE‡ÅÛ®­Á¼iÏç3zá“1¹ÒÒR¶2ƒ0JJJ09Là:;;›šš¢f‰^ý¬§§g~~~kkëÇ­­­———’.‰aI«««U  Ùíö½½=ó®mZÓÜÜ<==ýôLŽÑ¸709€xHOO÷ûý‘éâX………Æ”ŒŒ ‡ÃQ^^ÞÛÛ»½½mô¶ •°Ø¡ ¯®®º\®kk¸¶ico­“Làÿc±XŒc ggg˜­¯¯;ÒÝÙÙQPm6›N<99Q[__WÑÁÁÁÆÆÆkk#²iíˆEEEŒÖ¿›ÍÍÍööö”””ĺ’±º½»»[QQq«ªªªªôD2÷&ð¨ñx<ýýý¡PèèèHÄKÆB•þæÍ½¾MQ\\ÜÕÕ%ž'ççç«ul‡#H¢ßïïîîniiY[[SÏ[kkk§§§%½¯¯Ï¤†0"›¾úù®CMMÍÓÕáhít:ï' âp‘Ý^YYIOO÷ù|·ªjjj*--MÿA½€É<ê™ ‘9‹Å’œœl\î–½¿¿6·!îeµZ333etQSw‚8YRR’Ëå–h}}½ž–óz½­­­'''&5„ÙôÌÌŒ~•O¿‹Éý&ijÑä"»-w‘Xiä_7add$55õa·Áä09ÀäâúÝ—€&ÙmùCÂívÇ][NNNmm-÷&€ÉÝw·777%<00wmýýýRƒÉ«9˜&˜Üã5¹•••—/_Úíö7oÞè¬ýýýššI´Z­n·{~~^iSGG‡Óé<88"¥¥¥æõ¯®®fddØl¶žž£2Ž©ôŠŠ õ„]Õœ’’rzzÚÖÖ&í¾xñ¸í„sss%]?‚WéVÝ‹¿ß/5tuuqo`r‰gr¢2UUU~÷îÊm²X,¢Y_¿~•ô´´´+ÃÛsssR*33Ó¼~QÀ­­- |ÿþ]»—R1‘<•˜““sexýbttTdnaaAg]ý|ýYÎëõ^^^ŽŒŒM®°°PÂçççq_„ããc©¡  à¡~ ÿðÿ ¾ýœ7+[\\ä·`r€ÉÅcr*¼½½-ᬬ,mrZÔ$]YF¹á+Rª­­-,±¸¸8l£ª«hï1HØjµªpSS“~*Òi&˜|Ý”SSçerR_¾|©¯¯×Åâñ]¨¨}ΫÙÝÝ•‚RÕ¼ú€Éý>ä'+Z?::GY¹KÙw“L œÂÂB㳤«Ÿë~ŠŠŠÔúñ«Ÿé¥åµµµ±6ÿÿû—þVBCCƒqÚ)ìÙb___oo¯š?»–ššã(('r£PüFw[°Ùl*000ðéÓ§¦¦&•;;;«ºjr¦Æ¡×årE>Ï29©ÎÎΩ©)ãBÅêÃû÷†&&&òòò¾ÿÎhlllˆÉOíV¥ä/“ôôtãíÉar€ÉüÍTòóó?þlTùùy#‘¤‚‚‚ÖÖVýÐÐn·Çúv†È„q¶`ÿåË—W?§úÂYêù0Eiié Ç'ÌŒ“[âaâjÚ`DԀΠƒú+eee¢hÆ#ÕÚ²XgjD55¥[Œu¦FäãDÝÂÂÂÙÙ™ÉI·µµ‰À EFã»PQû /«ñéí] W%%%üµ[&ÇØ˜<"“ëîî…BKKKúc ÅÅÅ]]]â ’.""¤±ÕÖÖŠˆøý~5‡dD”B} Udtt4;;[/êr8@@ª’‚ÒVKK‹–~ŒøæÍ›X ï¢N‰iIUïß¿­=êééÑÝVUíïï‹qêï·‰H×$IW¥¸Ò©XgjÄãñHUrÀÑÑ‘4ª¾þorRR¿ÏçëììÓŠãBE태—89k9_QÛÈE~¿2\Ä“cìL‘É©Çsssõ Ðîî®8ÕjÍÌÌ”_Ù":*ÝëõVTTˆ?‰…„Õ#e«««m6[RR’`\&†$‰.—kxxX©Œq¶ITƸ†Ìœ™™™ääd1N ¨¹(©J/’nçååI·¥ósssºTØkrŒœˆÚÝ+Ö™Ó‘’K$Mk59©ÒÒR©_õ02ß…ŠÚ9/u;;;r MMM‘‹üâC´þ-@läÁä09L09Àä“L09€øy<ýÀä“Àä09L09Àä“€;àì쬫«+55Õf³•——ßü#‹ÅÅÅgµÅÛŸÜ–gûBLà±344TYYùëõÔÖÖJUêÛÎ[[[Æýå?l l ‚É$ûûû¹¹¹_¿~½óš­Vk™\iié“ßÜ¢©©INSþeŸòÌ7êÅäî€ÃÃÃÖÖÖääd›ÍöêÕ+IÙÞÞ–°ØÒÅÅECCCjjª6³×¯_«#õ®hB(z÷î–‘‘±¼¼œ””tyy999)5èYó4±š6rvv&‚˜p&Çh ܘÀ³²²’ýáÃ1'2 «ô­­­ªª*¿ßŠ‹‹%euuÕív‹Ÿ‰¥‰üÙívuäééiyyyOOÏÑÑQ0”RÍÍÍ*kxxX©¡"V aD6†T+f´L“xÖˆ{¹\®ÍÍÍȬ¾¾¾ÞÞÞ?~¨èññqQQ‘¯¢ýýý---*ÜÐÐ :¨ ÖÖÖ~ùòE‡ÅÛ®­Á¼é0666¼^/£õsFîØööö”””°ôÝÝÝŠŠŠ[U%3ìííqo`r IgggSSSÔ¬ÒÒR£ŸõôôÌÏÏomm}øð¡   µµU½| ‰aI«««U  Ùív=LƪáÚ¦|ÿþ]Äñüü“{ΈÃÙ+äžIOO÷ù|·ªjjj*--m}}{“H“ˆlZ"*¼¼¼\ZZŠÉý"{{{êå=—ý&·¹¹)a¹Iâ®MþH &SŘ&ð‘!PdÎb±$''—»egg‡m¢°»»+îeµZ333eÔQSw‚VRR’Ëå–h}}½ž–óz½­­­'''&5„Ùt]]]ØgTEæ0¹_$77W_O§Ó©ÇÆÆ222l6›üàôOauuU%öôô(yŠZ6‘Åcµ%wcGG‡8âééi[[›Ýnñâ…qó KÓ’>33c4¹ÎÎN ÏÏÏÇ}5äï ©AþÒx¨{Cþ8yæ»\,..br€ÉÝfÚãÏ(•‰x}ÿþ]999*Ýívomm]ý|ÝDùÂA,¢Ú–H¡ªvttTdnaaÁØq8ùóàòòrddÄØÂÂB ÿÊ{0ÇÇÇRCAAÁCݶW/&˜ÜíL®¸¸8lpUé‹¥­­Í¼¬ Q‹Gm+ò= ëÍ<šššôЋ‹ ã‘bx¿ø¨W5ë‡÷po<ç99L0¹;09µ!GäaYYY’žm|qøæ&µx¬¶"MNG“““ceݼ3æWãA¶€cœº˜`r¿dr‹E¢êI¨‘ÓÓÓææfu°~'ææòµx¬¶LLN …B‘YjNNgÅ}5pN“Ãä“û%“óx<­ªªº¸¸ØÛÛ Ûñvyy¹®®N»Îm§ÁŠÇjËÄäÔkêeµºNÛ›Z' ã¾ggg»N“Ãä“»‡CMŒItrrR¯Z³Z­z/5‰îìì\ýüøHfffÔ²æ¾Y]â—ÆBÌ£ªf©!êĘ&ðD‡û•Ïèÿ&ËËËcEE¼îªÏrúyyyŒÖÑÁ_ù^´Çãñz½è1˜&O¡¡¡ÊÊÊÛfýJµæŒ=ȧSÍùøñc¬¨ôÙøÅã_ajj*7GGGiii£££q”œœdßULàŽÙßßÏÍÍýúõë­²~¥Úk©¯¯_XXxlª¶¶vmmÍ$uw×{>}Fë{`ccC„lbbâV¥|>_zzúÊÊÊÃz &‡É@077çr¹ÔÇŒ/è‰]½~ý:99Ùf³©YŸÉÉIµÀ+ò Á¨Y‘5DmΤÚX J%)))2ÔÉ¿Æ=,--J…Gí º½½-ã% _½ze·Û¥“3äðð°µµU#5¨Di÷Ý»w2Ngdd,//'%%©•p’ž––¦ËFFNçúúzqq±Ô–m°cÕ«R•iiiÑðæÛC=íÑÚ›{îÉÁÁAMMÍ­ŠTVVÞÉ2JL“€§˜ÁêêêÕÏ•øZ¤$Åív‹c‰Fˆ@èÏ4 k‡#,+V Q›3©6ŒÓÓÓ²²²®®®“““££#‘!ãB¢?Š©ì>ŸOrUºÈ\cc£/¤´>==]]]mÞO#bZRí‡...D›$¬zR^^ÞÓÓ#ÕŠ VUU577«ã¿|ùR__¯‹‡EÅ5333ÛÚÚÄ ¥¶‘‘U¡yQû W'²»»+¥ª[½úÀh ܘ$<………Æw÷®~®Ç/**™PÑþþ~ýé‡ÚÚZ‘ž¨õ³LjˆlμÚ0jjjŒ«ŽDãDnTxkkKüF7ªfeT```àÓ§OMMM*wvvVíÔdÒOäº\®È÷ŒsiƦSSS:+,*¢¶O”þˆ¬:cõáýû÷CCCyyyß¿o¬*))ùàÏÈ]Éarˆ%äççþüÙ¨óóó"F"I­­­ú¡¡ÝnúÐ',+V Q›3©6 ŸÏgœÜWÓ#¢tn0t8*\VV&Šf¨óÊÊʲX,ñmÓÎ.é`&‡É@bpzzZTT¤?|•‘‘!T^^ÞÛÛ«¶W¬®®ªÕf‘„eŪ!js&Õ†QZZj¬­££CÓUûúÿÄÄDCCƒDÎÄuÆÆÆTºDÕÚ²kû©HOO÷ûýa‰âXê›a ‹l©°Xš~Z‘õx<ƪú~b^gÔ>ȉ¤¤¤ˆ¼Š) Þœ¥¥¥·±‘;“ÃäàQ#¿ªÔ,T0]P‰â=ÆcvvvÔ3¾ÁÁÁÆÆÆ¨õ„eŪ!js&Õ†a\«þéÓ§úúúÖÖV¶ÄMüF½.:;;›ŸŸ¯Ó§¦¦ôÄ^¬~‘cŒu gggÆžœœœˆ ®¯¯«èøøx[[›ÜÐÐPdtzzZ¬Ñ¨k/_¾µ ;»°:£öAÎKˆ«ñé-`rðŒL®»»; ÉßYYY*±¸¸¸««K\AÒEDDƒÔK µµµ""~¿_Í! ËŠUCÔæLªœÓ’âïß¿ÿ=êééÑÝV‹íö÷÷E õ÷ÛD¤Œß€û‘âJ§bõÓˆÇ㑪䀣£#iTí¶îp8€$J·åŒZZZÄÕ³Q©ßçóuvvŠiEª=:ÅÕFGG³³³õê7“:£öAÎK œœµœ¯¨mä"?Àäà‰›œ`±Xrssõ Ðîî®8ÕjÍÌÌ|ûö­ˆŽJ÷z½âOb!aõ„eŪ!js&Õ†133“œœ, («ŸsQåååz4š——'JÓsssºTØkrŒ´¨v÷ŠÕO#bZ"RÒgiZ¯cíKJJr¹\ÃÃÃÊÏôZii©Ô¯z•cª««m6›—S6.˜3©3jä¼ÔYìììÈ)455E.òL“Ãäööv19µ’“xÖ×××ÛíöÌÌÌøv¥ƒ{F}QOþÅänÇãÙd ž9CCC•••wRUuuµzgerr2##ƒkûøùÇüG19ù“H<ö÷÷sssï|þLd.))‰ËûøaÀoçðð°µµ599Ùf³©­{···%lµZ/..ô÷¢ÅÌ^¿~­Ž4î¥!jõîÝ;9,##cyyY4ëòòrrrRjÐ[˜× ‰Õ´‘?ŽŒŒðƒ{ü,..ŠÆÉ¿˜Àoaee%;;ûÇbN"dz/­­­ªª*¿ßŠ‹‹¯~î#âv»ÅÏÄÒDþôG¤OOOËËË{zzŽŽŽ‚Á ”jnnVYÃÃÃJ ±j#²iÅùùùçÏŸ;;;õ÷h Áäîq/—Ë¥¿«l¤¯¯¯··Wäùøø¸¨¨HS°¿¿_B¹¡¡AtP¬­­Õ›|HX}hÚ¼ó¦ÿÏ~’••%FÈÏ“xîtvv655EÍ*--5úYOOÏüüüÖÖÖ‡ Z[[Õ'”%1ì!iuuµ „B!»Ý®?:«†k›ÖH…ëë냃ƒ999üì09€çNzzºßïLÇ*,,4¦ddd8ŽòòòÞÞÞíím£·mll訄ÅUxuuÕår][õMGÂë˜\Y,ãÄØÂ‚Úlmvv¶¾¾>ìHctggG=B5~׿ääDï& 666^[C‘M‡!­TTTð³Ãäž;G}¡íèèHÄK‹ÿÍ›7z}›¢¸¸¸««K+’Ä­­­ÂÂÂìììP(¤,**RÒµˆ~YattTdnaaAg]Ýf¦Íx¤ü *ÂÔ××wqq!N)é_¾|1ÏRóa‚tòòò²ººZÂïÞ½3otggGú/bzrr"Q)èr¹ô{¦MJJRç.¢f±X$,V'5§¥¥z²r%%z~~÷OóøøXjˆ|7“Àä๛\qqqØŽì*]©Xgg§Šnll|øðÁ¤HäË Ùú“SÊ®DÔ$,6ižÖµµ5 gee]Ûnww·Ùßß/á`0hù‰z éóù^½z¥“ËÌÌÔ]•c" ;Y»Ýþ‹Ï‹UýRÏÍÊbŸýë_åߨ’käÿ÷c©öÃæÊE÷œû_ÿõ_ÿöoÿö?ÿó?0÷¿ÿû¿cý%ö›rå/ÆÈW <÷¹›œùÕùÏÿüOÉ\Œò»s@ä#ðG’+=š;77÷›rÿã?þ£´´T~Ñ$P®ü"ÜXÏ~_îÈȈËå {æx[“ÙŠåêù©ÿêëëõü\¬"‘&KnnhrF’nÕÐ$ËXƒr eÂÞÞžh™\O wuuI5553^d©sll,///Ö š\‡øÿþŽv&?åù—‘¬¡¡¡¨µ=ÎÜþç–Üý×½çÜú§z„¹---‘‹ÜÉš+ʳÊ}î&÷Ü~Þæ¹ü.xü¿ÍÍsMƧ››œè‹D·¶¶"|÷îd‰ÄÈÐküZ¬"wnr⑾¸¸ÐY‡ãÚ¬Ènè,sêêêä`ùKC,mww·µµU¢KKKÉÉÉz:gmmMl/??uuõ†&§¤Óøœ:>“»ÕœÜÀÀ€ôSþ‰ZÛïË5ùëâÑæþî¿ÄëïÒß÷WºyîãœÑx¨ùæä⟓{¨Üß73lžû8gÑê™Âc~Â+×ä™ÑÍMÎãñH´ªªJ¬hoo/555lšJ~éë÷ܹ̋ɕ••)µ’°´%áúúúk³T ʜ俀êêMšVKß^¾|)§,Qù%«,P×,ˆHâÑÑ‘É F]' ãþÅuvvÆ:9Àäž&c¼¨‰šQS Ë&''õr7«ÕÚÒÒb<¸²²RÒ¿}ûfLŒZD­Ê$ k4a¥”Z‰ÁˆË¾ÿ^ÚÚØØ0ZWÔ,ýâ…È\cc£„———oxÝ”¤Ž«¨ZhœTl"yªê☟l仫a[{™GÕ´ÔÐÑÑÉ&ðÜMnzz:)))??_oè944¤Þܬ«« ›:òûýj­X‘EÂ^€‹F6ýwSÄ‹bQYYY"jâXê›&šXYªøðð°tÏårÍÌÌÜüº‰«éIPé¶Ô¯Ÿá "ˆR­ÛíV¯÷JýÒ´ùɪ¹=£†míeUW[jØÜÜÄä“xî&÷ôÁ=¾o ‹ïª'¶ñ!ªêõzù)&€ÉarÀÑÑQZZÚèèhe'''Ùw09LîYpzzj\²öxØØØ!›˜˜¸U)ŸÏ—žž¾²²ÂO09LîQ`‹ÍÝV~ŸíÞ„ƒƒƒ¨‹M¨¬¬4YYˆÉ`rÏ‹¿þõ¯×~ “~Ê€É=ÞßzæM`Œ~Ê€É=FÖ××M>å Àü”“Æxà§ ˜0Æ?eÀäžç_RRò<]äç‹É`rðdMž˜&O¥¥¥·ð<ÛŽ09Àä“»Øã0¹D…=“KTØã09Àä“Àä“L09L.1aÀäöxL.QaÀä“L“L09Àä0¹Ä„=“KTØã0¹D…=“L09L09Àäà“[\\ü 6KKKüçx¤&§>œ ‹·oßòŸàQ›\iié[€?SRR‚É$€É1Z÷&˜`r‰ÉîînEEÅ­ŠTUUíííar€É=$+++ééé>ŸïV¥¦¦¦ÒÒÒÖ××19Àä†ýý}§Ó999GÙ‘‘‘ÔÔÔÃÃCL0¹ÀëõºÝçääÔÖÖbr€ÉÝ7›››r⮡¿¿_jØÞÞÆä QMÎï÷ëoÒ666£¯_¿–êëëUôßÿýß;::œNçÁÁÁË—/KKK¯~>⬩©±ÛíV«ÕívÏÏÏ+ÍêììLMMýþý»Çã‘Üææfm`±²ccc6›­¢¢B*WE"Û•J¤Kª¹_9ñ®®.LÕä„ñ$‘§““‰^^^º\.ýB¨D“’’DªRRR”ÒÍÍ͉effJnnn®Åb‘ܯ_¿JVZZš$ê#gff¤xuuµ„ß½{gž%H¢DWWWÅó$““c,bl·°°PRÎÏÏã>ëããc©¡  “€69¡»»[zÕßß/á`0hù‰zòèóù^½zõ·Sú‰ñE19¥V*WJI  ©#UúÚÚš„³²²Ì³„âââ°­«bµk·Ûun|¨žH=˜$¶Éíí퉄¹\. wuu‰âÔÔÔHT4nvvÖhT‘J466–——é^Fg²Z­×fI ªŸE¶µ'·þ š~<&×ÔÔôdzgqq‘ß €ÉÝ‚ºº:õSœlww·µµU¢KKKÉÉÉ———±üimmMü/??uu5–É©¨Ãá¸6KlR¢[[[ךœš“üE“{„sr¥¥¥yöÈ¥à7`r·@-tÓ¯loo+Ǫ¯¯71*Ñ8I9:: ËUaeZ;;;®ªªº6Ëãñ¨èÅÅÅÞÞ^jjj¬vÕ:¹`0÷ùž=ÎurÏ|N“L.N”H«¨Zµ¦wPP¯DíS"(|ûöM»×èè¨[cc£„———Z5krrRÏÊX­Ö–––XíF¾»*ngÔ2ó¨¤†ŽŽŽÇfrÏ|œº˜`r·fllLÌL¿:==-:uqqa40ã ‚™Ífs»Ý[[[bK.—kiiI<<<,¹’833ó×%v–044¤^¤­««SSnQÛUS†Æ}ºŠŠŠ¤7Œª†¤†ÍÍML“x &w—'û„;yYáêçÂ>õ 8><×ë}„ƒÉar€É=}“;::JKK£ìää$û®br˜\NOO•®ß<+666DÈ&&&nUÊçó¥§§¯¬¬È‡Éñ£É&˜&€É`r˜&˜&€É`r˜ÜcÃápÈppÀ¥Àä0¹_åøø¸¾¾Þn·gff~ýúõw7'çv»¹Ï09€gmrCCC•••¿^Ouuu(šœœÌÈÈøÝÝëêêâ>Ã䞯ÉíïïçææÞíšÈ\RRÒïîy}}ýÂÂ÷&ð¤LnnnÎårY,µ§QÚ^¿~œœl³Ùº»»%errÒjµê?MÊj¶··¥¸”º¸¸hhhHMM<æãÇ###±$ïÝ»wR*##cyyY„ïòòò&í*å°”””••ùWjÓYKKK………Ò1ÇóãÇÝU9^ÂÁ`ðÕ«Wv»]j0¹ €É<°É‰š¬®®J`~~^[‘¤¸ÝnQ71§ÃÃCq•><<,Šc^6Œ­­­ªª*¿ßŠ‹‹uúùùùçÏŸ;;;gff¢<==-//ïéé9::µ’Jš››oØ®”-++ëêê:99‘âÒ®×ë5ºcvvö·oß$ìóùt¯DæåøÊÊJ©zzºººÚü‚&ð&WXX¨œFs||\TT$B£¢ýýý---*\[[+6cR6’¾¾¾ÞÞ^5ïõ§þIVV–r²HVVVtTšþòåË Û­©©ÕQѸ>h³Óg'Øl6øôéSSS“Êmkk3¿ €É<¤É‰ äççþüY§ôôôÌÏÏ‹ñˆý´¶¶ªgš¡PÈn·ïíí™”¤´´Ô(d©m}}}pp0'''2W:öSO]Û®Ï端¯7Š©¸Ú÷ïßUTD-èÜ`0èp8T¸¬¬LÍx¤ZZë‚<ÿx®”””ˆÉÉ¿@l–––ø ðdMîêç³È¢¢" ÍÈȹ)//ïííÝÞÞÖ‡­®®ªed&eÃã),,4o=êëâm:*áÎÎζ+îhìvGGGff¦Ž†­Õ›˜˜hhhP]µX,cccºçN§S-­‹uAÕ¼@,Þ¾}ËoX€§irò[^M/ƒÁôôt•(Bc33fr@ÀãñX­ÖœœœÙÙYíd"‹§§§mmmRðÅ‹Æ]¢ö³³³SjžŸŸûšøý~©¡«« “Àäûh&UJ³ÄÛ¾ÿ.½E›ÛíÞÚÚ’€JZÖ„Q1¯×{yy922b,¸¾¾.×××wqqQ^^.éê{Îb‡ê°ÑÑQ‘¹……cbõ³°°P¢çççq_ããc©¡  €{“H0“+..ÛBJ¥[,–¶¶6ó²&455éG–blÆ‚¢wVß>‡“pQQÑU´#$,ÎgÞOñÅ_|n«Ú Ûbä~î ¹Jl>»¸¸Èï Àä“‹ÓäD•¢šPVV–¤ggg‹Ï©MØnerÉÉÉaNÕ½¤f£®EšœŽÆêç¬À3öá>ïÒÒR6Ÿ•KÁï Àä“‹Óä,‹DÕƒT#§§§ÍÍÍêàöööÛj“ª6 E´Ùl¾¸¸ÐY‡ãZ“‹ÕOå…º¡¸¯ sr÷&˜`r¿jrG¢UUU¢V{{{©©©Æƒ———ëêê´åÜÜäÔëRüêï+í´o•••IXmȦ¼Ö××_kr±ú©ÖɃÁ¸/ÈÙÙëäð?&˜`r·Àáp¨É-µ:mrrR?ç²Z­---Ú¢ÔÞ»¢M™™™QËš0>>®ôèèèH-†”~}ýúUeŸŸ¿ÿ^ÝØØ¸úû›‚"£±úùQËÌ£W?_¤•:::¸709Àäûh===”””ŸŸ¯·r:6›­®®NOnIÏåHq8—ËõíÛ·XeMjÅüÔvXR{{»26Áçóeee‰y<ý‘°·"_nˆÚÏíím9À¸O—X¦èÚ £ªZ©ass“{“L€Ñú¾±+--»¸Ø¤×ëåÞÀä“`´~ŽŽŽÒÒÒFGGã(;99ɾ«˜`rŒÖÉÆÆ†ÙÄÄÄ­Jù|¾ôôô••î L09€ç5ZÛbó ý988¨©©¹U‘ÊÊÊ›,øãÞÀä“L€{“Lm *))aG&Cî L“L` ˆ&‡É&—¥¥¥·±ÑßÒÃä09L“Àä“L09L“Àä“L09¸‡Cþƒƒ.&€Éüggg]]]©©©6›­¼¼|ÿw·(çv»¹ò˜&Ïš¡¡¡ÊÊÊ_¬¤¶¶Vê¹¼¼”ðÖÖV~~þïîöØØ˜¸#?>L“ƒçËþþ~nnîׯ_ï¶Z«Õú»{^__¿°°ÀO“ÀäàI177çr¹,‹ÚÍÓ(m¯_¿NNN¶ÙlÝÝÝ’299)Ê¥÷ý4)«ÙÞÞ–âRêâ⢡¡!555ò˜³³3±Ã¨} …BïÞ½“RËËËIIIjïÚvƒƒƒrXJJÊÊÊŠü+µé¬¥¥¥ÂÂBé˜Çãùñã‡îª/á`0øêÕ+»Ý.5˜\Àä“x`DMVWW%0??¯­HRÜn·¨›˜Óáá¡8JÅ1/ÆÖÖVUU•ßïÅÅÅ‘H>|ˆL?==-//ïéé9::µ’Jš››oØ®”-++ëêê:99‘âÒ®×ëÕ¹?~ÌÎÎþö후}>Ÿî•È\cc£_YY)õOOOWWW›_Àä“xH •ÓhŽ‹ŠŠDhT´¿¿¿¥¥E…kkkÅfLÊFÒ×××ÛÛ«æ½"ÙØØ0:–‘†††•••¦¿|ùrÃvkjjFGGuTšÐ²(f)§ÏN°Ùl*000ðéÓ§¦¦&•;;;ÛÖÖf~A–ÅÅÅ?ž+%%%brò¥%~Å`rðÄAÉÏÏÿüù³Néé陟Ÿãû)((hmmUÏ4C¡ÝnßÛÛ3)Iii©QÈŒ|ÿþ]tíüü<2K:öSO]Û®Ï端¯7Š©¸š´¥¢"j@@çƒA‡Ã¡Âeee¢hÆ#ÕÒºXä‘ÌKÄâíÛ·üŠÀäàészzZTT$¤¢"7ååå½½½ÛÛÛú°ÕÕUµŒÌ¤lb<………Q³ÖÖÖÚÚÚb)‘x󮮠qꮳ³ó†íŠ;»ÝÑÑ‘™™©£akõ&&&Ä&UW-ËØØ˜î¹ÓéTKëb]Gbrr¾oþŒš³Ää09xê7è_þ¢\* ¦§§«Dã1;;;ê±æàà`cc£yÙ0fggÓcÆ)7ó1F?ñNNNÄ¢Ö××oØ®±ì§OŸ¤­­­:%l‰›Ûí§T]5~ ejjJ÷<Öy$&Çh ܘ<_“ëîî…BKKKYYY*±¸¸¸««ëììLÒÇÇÇÅoÔ*·ÚÚÚééi¿ßß×׫loÞ¼1®«S Hµ*¼¼¼\ZZYÐáp©\š“VZZZÄ·ÔóÖkÛ½Ó’Þ¿/ö)744ÔÓÓ£ÏNui_Ä´¿¿_¥·µµMLLèJDã¤xoo¯Éa´L“x`“,Knn®žôÚÝÝY±Z­™™™2ˆÁ¨t¯×[QQ!btrr«lÙÙÙaû7ÔÕÕ…-噋,(”””är¹†‡‡•Wéi¹kÛ™™INNÉ“ÀÕÏ7'¤¬^Þ'g———'g'ç877gìªñ59FÎWíîë‚0ZG²¹¹ÙÞÞž’’ò”š“@îü[©ªª2®(åÞÀä`´v:æù»[Äá~ws+++ééé±V‚Æbjj*---Ö_8˜&ðGëP(tŸ&÷»›Ûßß7\!pFFFRSS¹709€„­ïÓä~ws^¯×ívÇ]<''§¶¶–{“ÀäÍÍM©y`` îúûû¥†ùr &€É&¿Z­¬¬¼|ùÒn·¿yóFgíïï×ÔÔH¢Õju»ÝóóóJ˜:::œNçÁÁ‰ú.sKKK¹¹¹RÏÌÌL˜ÉÇ#õçääÌÎÎj'“&RRRNOOÛÚÚ¤à‹/ŒûŒeddØl¶ŠŠ ýŽNgg§Ô¬:~¿_jèêêâÞÀäÉäDb...ªªª$üîÝ;•%úe±XD•¾~ý*éiiiW†·æææ¤”ñ3ÎQÙÙÙóz½———###F“[__‡ëë듦ËËË%]}ùO¿‡1::*2·°° aQ=UJéàêêê÷ïßé………ºÉ 9>>– ¸709€D29ÞÞÞ–°þ쟘œ5õ)c‘¾ÐÔÔ¤Yб›½“°úJŽ8œ„‹ŠŠ®¢½!aq>...û2ŽJ_üÅ綪ݰQßç½!׊Ígù]˜`rñ˜\˜3)¹ËËË3v«µnÉÉÉaõGu¯ËËKcÓ‘½ÒQ9&jëw²/ìôïùÞ(--eóY¹ü®L0¹xLNMJ9][[s¹\ùùù«««q›œÅb‘ƒÕ–¸aem6›„/..t–nÚÄäT…[[[a )/Ô Å}5˜“{(09Àä“û%“[__—pKK‹ŠŠÆITí¥·Éåææê­AÔÊ6í[eeej½ÝÕß¼êÍsMLÎãñH¸ªªJŠìíí¥¦¦ªtµN. Æ})ÎÎÎX'÷àÿA09Àä“»bBÒ«™™õÆCzzº^§f¹¶··Õ·oßÔk77¹ññq¥Gb„j1œ ôKU+Yçççïß¿·Z­Wó@@dtrrR?‰“"Ú;#ß]·3j™yôêç‹´RCGG÷&˜@bŒÖ{{{6›M¼M¬ÈøÃè訤»Ýî­­-ñ—˵´´¤-J¿q-CCC‡Cm‡åt:ÛÛÛ•± >ŸOê!óx<ú;#ao3D¾Ü J=Ò·ºº:= §^×0îÓUTT$ݾaTU+5lnnro`r€É0Zß7"v7ù¾],Ä&½^/÷&˜£õptt”––6::GÙÉÉIö]Åä“`´~H666DÈ&&&nUÊçó¥§§¯¬¬po`r€É<¯ÑÚ›éÏÁÁAMMÍ­ŠTVVîííqo`r€É0Z÷&˜<Ѫ¤¤„™ ¹+09L09H€ ˜&˜<^–––ÞÄFQ“Àä09Àä“L09Àä09L“L‡Ã!pppÀ¥Àä“€_Åï÷Ûív›ïg[zq8·ÛÍeÇä“xÖ UVVÞIU[[[ç~º=66ÖÕÕÅ“Làù²¿¿Ÿ››ûõë×;©mpp°··÷~z^__¿°°ÀO“LàI177çr¹,‹ÚÊ(m¯_¿NNN¶ÙlÝÝÝ’299iµZõæQ&e5ÛÛÛR\J]\\444¤¦¦sËÊÊ–——Mú …Þ½{'¥222äȤ¤¤ËËË›´«MQKIIYYY‘¥6µ´´TXX(óxŸO÷Jd®±±Qޝ¬¬”ú§§§«««Í/`r€É<$………Êi4ÇÇÇEEE"4*ÚßßßÒҢµµµb3&e#éëëëííUó^FD¡ŒRICCÃÊÊŠŽJÓ_¾|¹a»555£££:*÷áÃm–¢qúì›Í¦Ÿ>}jjjR¹³³³mmmæ09Àä”üüüÏŸ?딞žžùùy1±Ÿ‚‚‚ÖÖVõL3 Ùíö½½=“²‘”––…LÓÜÜ<==«”t ì ¦ž»¶]qÄúúz£˜Š«}ÿþ]EEԀΠƒ‡C…ËÊÊDÑŒGª¥u±.Èý³¸¸øÇs¥¤¤DLNþýb³´´Äï4Lž§§§EEE"@*š‘‘!rS^^ÞÛÛ»½½­[]]UËÈLʆ!ÆSXX5+==]+V—ÄÛ666tTÂ7lWÜÑØíŽŽŽÌÌL [«711ÑÐРºj±XÆÆÆtÏN§ZZë‚<Ô¼@,Þ¾}Ë/4LžÓ/µ¿üEM/ƒAQ+•(Bc>– ¸709€69Q%‰ªïЍIXoéa’e‚Åbikk K,..ŽœŒ|AÂâŽ*ÜÔÔ¤€ŠJüõ¥~ªé°MD¸709€39£]^^]Ê$Ë„¬¬,92;;[|NíÕ&HÁ¨îir:šœœ+ëN–úÝðt09L0¹Çkr6›M¢:×áp\›eÂééiss³j¥½½]%Z,‰ª§®749UD-%rNî?¤Âœ&ð&WVV&QµšzˆY__mÖµ,//×ÕÕiUòxjܧ«¨¨Htí†QU³Ô°¹¹É½É0Zß7¢ƒ¥¥¥q1õz½Ü˜£õptt”––6::GÙÉÉIö]Åä­’ ²‰‰‰[•òù|ééé+++ܘÀs­m±¹çžÔÔÔܪHeeåÞÞ÷&Àh ܘ<ÝѺ¤¤ä€?#w&‡É@˜@,09L/KKKobsÃïóar€É&˜&˜@¢p~~þæÍ»Ý^WW—p¿¼¼ôûý79²  àÇü¸19€'Eeeåððp(ÊÈÈH¬žƒÁÑÑÑ<22òñãG~ܘÀÓazzúÿµwíýqÇ¿df&»I]$‘I™˜™9vÑÝt‘n2“LÆÑE’ŒÌLºØMÒÕt“$éf’d2’t‘D’™IÌd232I2ßßû÷=|Ìô[¿‹ïæù¸˜Ï¿ó9çô¹èå윳™™™N<òJ¥ %€¶8þùùÙï÷³â$9º„„›ÁÁÁ‡‡‡Ž;r pš¦e2™¶¶aÑIrtƒ©©)õ›Q¯¯¯Fãåå¥Ûí6™L‡£á®²&]ßÊf³f³YÆ~~þþ~£ýåå% Ùl6é ‡Ãj¼ƒ®ëF»ßï7,—˽v»ÝëõªÁÑh4 ¶þ, ëN’ K¤R©ééiUÝÙÙ‘Àt}}-åd2©iZ+]Md2ŸÏwvv–N§MnooÇÇÇ÷÷÷¿¾¾$º©huss#ó' ‰}¹¤,Ÿg{{ÛpttdµZr¡Pèé鹿¿oëdew²‹N’ K„Ãá­­-•º$?•J%Õk6›ìjncc#‰¨ x•JE™š'/,,HAZ†††6—cSw¶U«Uu.‹Éñ´{²ÇÇÇ* ‚$@Çs:ê&¹ùùùt:­ºÊårooï]Íy½Þ››U][[K¥R’ ‰„ËåÒuýëëKÚWVVdßf¯ÉÉI£¼´´”Íf²¦iRm÷de/’#Yt’Ý Z­öõõ©ªºÍ°··~ìjBRšÛí®o”(á,‰¨X&¾}'ÜÕÕ•±ë“““ååeÕn±X$äµ{¾£££±XŒu'ÉÐ ’Éäììl}<ªï¿»»û±« [õó‹†ÛÔžžž...Œvãâœáüüüýý] oooV«5—Ë9N£EÍS,Û:Ù‡‡“ÉT(Xw’Ý  ¨ª¦iûûû¿ÿ<[:77Ç[éjbqqÑØª~žÕÕUÉdµZmwwWò™q Ãá9¥±T*mnnªoN%ÞIhnx¸¡!YºÝn—ËÕ¼¬ëz4eÑIrt‰±±±UÍçó&“IÖééiýÈ&]MØívI~ óÈ 2„³X,¦.³=>>J˜“Ðf³ÙêßKòïñ_¿âàï?ß“ÖW=ú÷Ûr.—óz½­¿C$9þjÅb1 uèÁûý~ …­×u½R©°è$9ºD<¯æ ³ììì´þs«2¸\.³â$9:ÞÈÈH­V;<<\__ïܳÈçó-¾O$N×?K’ÌçóÙl¶ÿ+Æ™ÿjäHr É€$’I›ï ÁŠñ°¥IEND®B`‚././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/mainpage.dox0000660000000000000000000000326100000000000016077 0ustar00rootroot00000000000000/** * @mainpage * * Tevent is an event system based on the talloc memory management library. It * is the core event system used in Samba. * * The low level tevent has support for many event types, including timers, * signals, and the classic file descriptor events. * * Tevent also provide helpers to deal with asynchronous code providing the * tevent_req (tevent request) functions. * * @section main_tevent_tutorial Tutorial * * You should start by reading @subpage tevent_tutorial, then reading the * documentation of the interesting functions as you go. * * @section main_tevent_download Download * * You can download the latest releases of tevent from the * tevent directory * on the samba public source archive. * * @section main_tevent_bugs Discussion and bug reports * * tevent does not currently have its own mailing list or bug tracking system. * For now, please use the * samba-technical * mailing list, and the * Samba bugzilla * bug tracking system. * * @section main_tevent_devel Development * You can download the latest code either via git or rsync. * * To fetch via git see the following guide: * * Using Git for Samba Development * * Once you have cloned the tree switch to the master branch and cd into the * lib/tevent directory. * * To fetch via rsync use this command: * * rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/tevent . * */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tevent_context.dox0000660000000000000000000000600300000000000017364 0ustar00rootroot00000000000000/** @page tevent_context Chapter 1: Tevent context @section context Tevent context Tevent context is an essential logical unit of tevent library. For working with events at least one such context has to be created - allocated, initialized. Then, events which are meant to be caught and handled have to be registered within this specific context. Reason for subordinating events to a tevent context structure rises from the fact that several context can be created and each of them is processed at different time. So, there can be 1 context containing just file descriptor events, another one taking care of signal and time events and the third one which keeps information about the rest. Tevent loops are the part of the library which represents the mechanism where noticing events and triggering handlers actually happens. They accept just one argument - tevent context structure. Therefore if theoretically an infinity loop (tevent_loop_wait) was called, only those arguments which belong to the passed tevent context structure can be caught and invoked within this call. Although some more signal events were registered (but within some other context) they will not be noticed. @subsection Example First lines which handle mem_ctx belong to talloc library knowledge but because of the fact that tevent uses the talloc library for its mechanisms it is necessary to understand a bit talloc as well. For more information about working with talloc, please visit talloc website where tutorial and documentation are located. Tevent context structure *event_ctx represents the unit which will further contain information about registered events. It is created via calling tevent_context_init(). @code TALLOC_CTX *mem_ctx = talloc_new(NULL); if (mem_ctx == NULL) { // error handling } struct tevent_context *ev_ctx = tevent_context_init(mem_ctx); if(ev_ctx == NULL) { // error handling } @endcode Tevent context has a structure containing lots of information. It include lists of all events which are divided according their type and are in order showing the sequence as they came. @image html tevent_context_stucture.png In addition to the lists shown in the diagram, the tevent context also contains many other data (e.g. information about the available system mechanism for triggering callbacks). @section tevent_loops Tevent loops Tevent loops are the dispatcher for events. They catch them and trigger the handlers. In the case of longer processes, the program spends most of its time at this point waiting for events, invoking handlers and waiting for another event again. There are 2 types of loop available for use in tevent library:
  • int tevent_loop_wait()
  • int tevent_loop_once()
Both of functions accept just one parameter (tevent context) and the only difference lies in the fact that the first loop can theoretically last for ever but the second one will wait just for a single one event to catch and then the loop breaks and the program continue. */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tevent_data.dox0000660000000000000000000000707300000000000016621 0ustar00rootroot00000000000000/** @page tevent_data Chapter 3: Accessing data @section data Accessing data with tevent A tevent request is (usually) created together with a structure for storing the data necessary for an asynchronous computation. For these private data, tevent library uses void (generic) pointers, therefore any data type can be very simply pointed at. However, this attitude requires clear and guaranteed knowledge of the data type that will be handled, in advance. Private data can be of 2 types: connected with a request itself or given as an individual argument to a callback. It is necessary to differentiate these types, because there is a slightly different method of data access for each. There are two possibilities how to access data that is given as an argument directly to a callback. The difference lies in the pointer that is returned. In one case it is the data type specified in the function’s argument, in another void* is returned. @code void tevent_req_callback_data (struct tevent_req *req, #type) void tevent_req_callback_data_void (struct tevent_req *req) @endcode To obtain data that are strictly bound to a request, this function is the only direct procedure. @code void *tevent_req_data (struct tevent_req *req, #type) @endcode Example with both calls which differs between private data within tevent request and data handed over as an argument. @code #include #include #include struct foo_state { int x; }; struct testA { int y; }; static void foo_done(struct tevent_req *req) { // a->x contains 10 since it came from foo_send struct foo_state *a = tevent_req_data(req, struct foo_state); // b->y contains 9 since it came from run struct testA *b = tevent_req_callback_data(req, struct testA); // c->y contains 9 since it came from run we just used a different way // of getting it. struct testA *c = (struct testA *)tevent_req_callback_data_void(req); printf("a->x: %d\n", a->x); printf("b->y: %d\n", b->y); printf("c->y: %d\n", c->y); } struct tevent_req * foo_send(TALLOC_CTX *mem_ctx, struct tevent_context *event_ctx) { printf("_send\n"); struct tevent_req *req; struct foo_state *state; req = tevent_req_create(event_ctx, &state, struct foo_state); state->x = 10; return req; } static void run(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data) { struct tevent_req *req; struct testA *tmp = talloc(ev, struct testA); // Note that we did not use the private data passed in tmp->y = 9; req = foo_send(ev, ev); tevent_req_set_callback(req, foo_done, tmp); tevent_req_done(req); } int main (int argc, char **argv) { struct tevent_context *event_ctx; struct testA *data; TALLOC_CTX *mem_ctx; struct tevent_timer *time_event; mem_ctx = talloc_new(NULL); //parent if (mem_ctx == NULL) return EXIT_FAILURE; event_ctx = tevent_context_init(mem_ctx); if (event_ctx == NULL) return EXIT_FAILURE; data = talloc(mem_ctx, struct testA); data->y = 11; time_event = tevent_add_timer(event_ctx, mem_ctx, tevent_timeval_current(), run, data); if (time_event == NULL) { fprintf(stderr, " FAILED\n"); return EXIT_FAILURE; } tevent_loop_once(event_ctx); talloc_free(mem_ctx); printf("Quit\n"); return EXIT_SUCCESS; } @endcode Output of this example is: @code a->x: 10 b->y: 9 c->y: 9 @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tevent_events.dox0000660000000000000000000002565000000000000017215 0ustar00rootroot00000000000000/** @page tevent_events Chapter 2: Tevent events @section pools Tevent events Ok, after reading previous chapter we can start doing something useful. So, the way of creating events is similar for all types - signals, file descriptors, time or immediate events. At the beginning it is good to know about some typedefs which are set in tevent library and which specify the arguments for each callback. These callbacks are: - tevent_timer_handler_t() - tevent_immediate_handler_t() - tevent_signal_handler_t() - tevent_fd_handler_t() According their names it is obvious that for creating callback for e.g. time event, tevent_timer_handler_t will be used. The best way how to introduce registering an event and setting up a callback would be example, so examples describing all the types of events follow. @subsection Time Time event This example shows how to set up an event which will be repeated for a minute with interval of 2 seconds (will be triggered 30 times). After exceeding this limit, the event loop will finish and all the memory resources will be freed. This is just example describing repeated activity, nothing usefull is done within foo function @code #include #include #include #include struct state { struct timeval endtime; int counter; TALLOC_CTX *ctx; }; static void callback(struct tevent_context *ev, struct tevent_timer *tim, struct timeval current_time, void *private_data) { struct state *data = talloc_get_type_abort(private_data, struct state); struct tevent_timer *time_event; struct timeval schedule; printf("Data value: %d\n", data->counter); data->counter += 1; // increase counter // if time has not reached its limit, set another event if (tevent_timeval_compare(¤t_time, &(data->endtime)) < 0) { // do something // set repeat with delay 2 seconds schedule = tevent_timeval_current_ofs(2, 0); time_event = tevent_add_timer(ev, data->ctx, schedule, callback, data); if (time_event == NULL) { // error ... fprintf(stderr, "MEMORY PROBLEM\n"); return; } } else { // time limit exceeded } } int main(void) { struct tevent_context *event_ctx; TALLOC_CTX *mem_ctx; struct tevent_timer *time_event; struct timeval schedule; mem_ctx = talloc_new(NULL); // parent event_ctx = tevent_context_init(mem_ctx); struct state *data = talloc(mem_ctx, struct state); schedule = tevent_timeval_current_ofs(2, 0); // +2 second time value data->endtime = tevent_timeval_add(&schedule, 60, 0); // one minute time limit data->ctx = mem_ctx; data->counter = 0; // add time event time_event = tevent_add_timer(event_ctx, mem_ctx, schedule, callback, data); if (time_event == NULL) { fprintf(stderr, "FAILED\n"); return EXIT_FAILURE; } tevent_loop_wait(event_ctx); talloc_free(mem_ctx); return EXIT_SUCCESS; } @endcode Variable counter is only used for counting the number of triggered functions. List of all available functions which tevent offers for working with time are listed here together with their description. More detailed view at these functions is unnecessary because their purpose and usage is quite simple and clear. @subsection Immediate Immediate event These events are, as their name indicates, activated and performed immediately. It means that this kind of events have priority over others (except signal events). So if there is a bulk of events registered and after that a tevent loop is launched, then all the immediate events will be triggered before the other events. Except other immediate events (and signal events) because they are also processed sequentially - according the order they were scheduled. Signals have the highest priority and therefore they are processed preferentially. Therefore the expression immediate may not correspond exactly to the dictionary definition of "something without delay" but rather "as soon as possible" after all preceding immediate events. For creating an immediate event there is a small different which lies in the fact that the creation of such event is done in 2 steps. One represents the creation (memory allocation), the second one represents registering as the event within some tevent context. @code struct tevent_immediate *run(TALLOC_CTX* mem_ctx, struct tevent_context event_ctx, void * data) { struct tevent_immediate *im; im = tevent_create_immediate(mem_ctx); if (im == NULL) { return NULL; } tevent_schedule_immediate(im, event_ctx, foo, data); return im; } @endcode Example which may be compiled and run representing the creation of immediate event. @code #include #include #include struct info_struct { int counter; }; static void foo(struct tevent_context *ev, struct tevent_immediate *im, void *private_data) { struct info_struct *data = talloc_get_type_abort(private_data, struct info_struct); printf("Data value: %d\n", data->counter); } int main (void) { struct tevent_context *event_ctx; TALLOC_CTX *mem_ctx; struct tevent_immediate *im; printf("INIT\n"); mem_ctx = talloc_new(NULL); event_ctx = tevent_context_init(mem_ctx); struct info_struct *data = talloc(mem_ctx, struct info_struct); // setting up private data data->counter = 1; // first immediate event im = tevent_create_immediate(mem_ctx); if (im == NULL) { fprintf(stderr, "FAILED\n"); return EXIT_FAILURE; } tevent_schedule_immediate(im, event_ctx, foo, data); tevent_loop_wait(event_ctx); talloc_free(mem_ctx); return 0; } @endcode @subsection Signal Signal event This is an alternative to standard C library functions signal() or sigaction(). The main difference that distinguishes these ways of treating signals is their setting up of handlers for different time intervals of the running program. While standard C library methods for dealing with signals offer sufficient tools for most cases, they are inadequate for handling signals within the tevent loop. It could be necessary to finish certain tevent requests within the tevent loop without interruption. If a signal was sent to a program at a moment when the tevent loop is in progress, a standard signal handler would not return processing to the application at the very same place and it would quit the tevent loop for ever. In such cases, tevent signal handlers offer the possibility of dealing with these signals by masking them from the rest of application and not quitting the loop, so the other events can still be processed. Tevent offers also a control function, which enables us to verify whether it is possible to handle signals via tevent, is defined within tevent library and it returns a boolean value revealing the result of the verification. @code bool tevent_signal_support (struct tevent_context *ev) @endcode Checking for signal support is not necessary, but if it is not guaranteed, this is a good and easy control to prevent unexpected behaviour or failure of the program occurring. Such a test of course does not have to be run every single time you wish to create a signal handler, but simply at the beginning - during the initialization procedures of the program. Afterthat, simply adapt to each situation that arises. @code #include #include #include static void handler(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data) { // Do something usefull printf("handling signal...\n"); exit(EXIT_SUCCESS); } int main (void) { struct tevent_context *event_ctx; TALLOC_CTX *mem_ctx; struct tevent_signal *sig; mem_ctx = talloc_new(NULL); //parent if (mem_ctx == NULL) { fprintf(stderr, "FAILED\n"); return EXIT_FAILURE; } event_ctx = tevent_context_init(mem_ctx); if (event_ctx == NULL) { fprintf(stderr, "FAILED\n"); return EXIT_FAILURE; } if (tevent_signal_support(event_ctx)) { // create signal event sig = tevent_add_signal(event_ctx, mem_ctx, SIGINT, 0, handler, NULL); if (sig == NULL) { fprintf(stderr, "FAILED\n"); return EXIT_FAILURE; } tevent_loop_wait(event_ctx); } talloc_free(mem_ctx); return EXIT_SUCCESS; } @endcode @subsection File File descriptor event Support of events on file descriptors is mainly useful for socket communication but it certainly works flawlessly with standard streams (stdin, stdout, stderr) as well. Working asynchronously with file descriptors enables switching within processing I/O operations. This ability may rise with a greater number of I/O operations and such overlapping leads to enhancement of the throughput. There are several other functions included in tevent API related to handling file descriptors (there are too many functions defined within tevent therefore just some of them are fully described within this thesis. The declaration of the rest can be easily found on the library’s website or directly from the source code):
  • tevent_fd_set_close_fn() - can add another function to be called at the moment when a structure tevent fd is freed.
  • tevent_fd_set_auto_close() - calling this function can simplify the maintenance of file descriptors, because it instructs tevent to close the appropriate file descriptor when the tevent fd structure is about to be freed.
  • tevent_fd_get_flags() - returns flags which are set on the file descriptor connected with this tevent fd structure.
  • tevent_fd_set_flags() - sets specified flags on the event’s file descriptor.
@code static void close_fd(struct tevent_context *ev, struct tevent_fd *fd_event, int fd, void *private_data) { // processing when fd_event is freed } struct static void handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { // handling event; reading from a file descriptor tevent_fd_set_close_fn (fd_event, close_fd); } int run(TALLOC_CTX *mem_ctx, struct tevent_context *event_ctx, int fd, uint16_t flags, char *buffer) { struct tevent_fd* fd_event = NULL; if (flags & TEVENT_FD_READ) { fd_event = tevent_add_fd(event_ctx, mem_ctx, fd, flags, handler, buffer); } if (fd_event == NULL) { // error handling } return tevent_loop_once(); } @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tevent_queue.dox0000660000000000000000000002277600000000000017043 0ustar00rootroot00000000000000/** @page tevent_queue Chapter 5: Tevent queue @section queue Tevent queue There is a possibility that the dispatcher and its handlers may not be able to handle all the incoming events as quickly as they arrive. One way to deal with this situation is to buffer the received events by introducing an event queue into the events stream, between the events generator and the dispatcher. Events are added to the queue as they arrive, and the dispatcher pops them off the beginning of the queue as fast as possible. In tevent library it is similar, but the queue is not automatically set for any event. The queue has to be created on purpose, and events which should follow the order of the FIFO queue have to be explicitly pinpointed. Creating such a queue is crucial in situations when sequential processing is absolutely essential for the successful completion of a task, e.g. for a large quantity of data that are about to be written from a buffer into a socket. The tevent library has its own queue structure that is ready to use after it has been initialized and started up once. @subsection cr_queue Creation of Queues The first and most important step is the creation of the tevent queue (represented by struct tevent_queue), which will then be in running mode. @code struct tevent_queue* tevent_queue_create (TALLOC_CTX *mem_ctx, const char *name) @endcode When the program returns from this function, the allocated memory, set destructor and labeled queue as running has been done and the structure is ready to be filled with entries. Stopping and starting queues on the run. If you need to stop a queue from processing its entries, and then turn it on again, a couple of functions which serve this purpose are: - bool tevent_queue_stop() - bool tevent_queue_start() These functions actually only provide for the simple setting of a variable, which indicates that the queue has been stopped/started. Returned value indicates result. @subsection add_queue Adding Requests to a Queue Tevent in fact offers 3 possible ways of inserting a request into a queue. There are no vast differences between them, but still there might be situations where one of them is more suitable and desired than another. @code bool tevent_queue_add(struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data) @endcode This call is the simplest of all three. It offers only boolean verification of whether the operation of adding the request into a queue was successful or not. No additional deletion of an item from the queue is possible, i.e. it is only possible to deallocate the whole tevent request, which would cause triggering of destructor handling and also dropping the request from the queue. Extended Options Both of the following functions have a feature in common - they return tevent queue entry structure representing the item in a queue. There is no further possible handling with this structure except the use of the structure’s pointer for its deallocation (which leads also its removal from the queue). The difference lies in the possibility that with the following functions it is possible to remove the tevent request from a queue without its deallocation. The previous function can only deallocate the tevent request as it was from memory, and thereby logically cause its removal from the queue as well. There is no other utilization of this structure via API at this stage of tevent library. The possibility of easier debugging while developing with tevent could be considered to be an advantage of this returned pointer. @code struct tevent_queue_entry *tevent_queue_add_entry(struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data) @endcode The feature that allows for the optimized addition of entries to a queue is that a check for an empty queue with no items is first of all carried out. If it is found that the queue is empty, then the request for inserting the entry into a queue will be omitted and directly triggered. @code struct tevent_queue_entry *tevent_queue_add_optimize_empty(struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data) @endcode When calling any of the functions serving for inserting an item into a queue, it is possible to leave out the fourth argument (trigger) and instead of a function pass a NULL pointer. This usage sets so-called blocking entries. These entries, since they do not have any trigger operation to be activated, just sit in their position until they are labeled as a done by another function. Their purpose is to block other items in the queue from being triggered. @subsection example_q Example of tevent queue @code #include #include #include struct foo_state { int local_var; int x; }; struct juststruct { TALLOC_CTX * ctx; struct tevent_context *ev; int y; }; int created = 0; static void timer_handler(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data) { // time event which after all sets request as done. Following item from // the queue may be invoked. struct tevent_req *req = private_data; struct foo_state *stateX = tevent_req_data(req, struct foo_state); // processing some stuff printf("time_handler\n"); tevent_req_done(req); talloc_free(req); printf("Request #%d set as done.\n", stateX->x); } static void trigger(struct tevent_req *req, void *private_data) { struct juststruct *priv = tevent_req_callback_data (req, struct juststruct); struct foo_state *in = tevent_req_data(req, struct foo_state); struct timeval schedule; struct tevent_timer *tim; schedule = tevent_timeval_current_ofs(1, 0); printf("Processing request #%d\n", in->x); if (in->x % 3 == 0) { // just example; third request does not contain // any further operation and will be finished right // away. tim = NULL; } else { tim = tevent_add_timer(priv->ev, req, schedule, timer_handler, req); } if (tim == NULL) { tevent_req_done(req); talloc_free(req); printf("Request #%d set as done.\n", in->x); } } struct tevent_req *foo_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, const char *name, int num) { struct tevent_req *req; struct foo_state *state; struct foo_state *in; struct tevent_timer *tim; printf("foo_send\n"); req = tevent_req_create(mem_ctx, &state, struct foo_state); if (req == NULL) { // check for appropriate allocation tevent_req_error(req, 1); return NULL; } // exemplary filling of variables state->local_var = 1; state->x = num; return req; } static void foo_done(struct tevent_req *req) { enum tevent_req_state state; uint64_t err; if (tevent_req_is_error(req, &state, &err)) { printf("ERROR WAS SET %d\n", state); return; } else { // processing some stuff printf("Callback is done...\n"); } } int main (int argc, char **argv) { TALLOC_CTX *mem_ctx; struct tevent_req* req[6]; struct tevent_req* tmp; struct tevent_context *ev; struct tevent_queue *fronta = NULL; struct juststruct *data; int ret; int i = 0; const char * const names[] = { "first", "second", "third", "fourth", "fifth" }; printf("INIT\n"); mem_ctx = talloc_new(NULL); //parent talloc_parent(mem_ctx); ev = tevent_context_init(mem_ctx); if (ev == NULL) { fprintf(stderr, "MEMORY ERROR\n"); return EXIT_FAILURE; } // setting up queue fronta = tevent_queue_create(mem_ctx, "test_queue"); tevent_queue_stop(fronta); tevent_queue_start(fronta); if (tevent_queue_running(fronta)) { printf ("Queue is runnning (length: %d)\n", tevent_queue_length(fronta)); } else { printf ("Queue is not runnning\n"); } data = talloc(ev, struct juststruct); data->ctx = mem_ctx; data->ev = ev; // create 4 requests for (i = 1; i < 5; i++) { req[i] = foo_send(mem_ctx, ev, names[i], i); tmp = req[i]; if (req[i] == NULL) { fprintf(stderr, "Request error! %d \n", ret); break; } tevent_req_set_callback(req[i], foo_done, data); created++; } // add item to a queue tevent_queue_add(fronta, ev, req[1], trigger, data); tevent_queue_add(fronta, ev, req[2], trigger, data); tevent_queue_add(fronta, ev, req[3], trigger, data); tevent_queue_add(fronta, ev, req[4], trigger, data); printf("Queue length: %d\n", tevent_queue_length(fronta)); while(tevent_queue_length(fronta) > 0) { tevent_loop_once(ev); printf("Queue: %d items left\n", tevent_queue_length(fronta)); } talloc_free(mem_ctx); printf("FINISH\n"); return EXIT_SUCCESS; } @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tevent_request.dox0000660000000000000000000002107100000000000017372 0ustar00rootroot00000000000000/** @page tevent_request Chapter 4: Tevent request @section request Tevent request A specific feature of the library is the tevent request API that provides for asynchronous computation and allows much more interconnected working and cooperation among functions and events. When working with tevent request it is possible to nest one event under another and handle them bit by bit. This enables the creation of sequences of steps, and provides an opportunity to prepare for all problems which may unexpectedly happen within the different phases. One way or another, subrequests split bigger tasks into smaller ones which allow a clearer view of each task as a whole. @subsection name Naming conventions There is a naming convention which is not obligatory but it is followed in this tutorial: - Functions triggered before the event happens. These establish a request. - \b foo_send(...) - this function is called first and it includes the creation of tevent request - tevent req structure. It does not block anything, it simply creates a request, sets a callback (foo done) and lets the program continue - Functions as a result of event. - \b foo_done(...) - this function contains code providing for handling itself and based upon its results, the request is set either as a done or, if an error occurs, the request is set as a failure. - \b foo_recv(...) - this function contains code which should, if demanded, access the result data and make them further visible. The foo state should be deallocated from memory when the request’s processing is over and therefore all computed data up to this point would be lost. As was already mentioned, specific naming subsumes not only functions but also the data themselves: - \b foo_state - this is a structure. It contains all the data necessary for the asynchronous task. @subsection cr_req Creating a New Asynchronous Request The first step for working asynchronously is the allocation of memory requirements. As in previous cases, the talloc context is required, upon which the asynchronous request will be tied. The next step is the creation of the request itself. @code struct tevent_req* tevent_req_create (TALLOC_CTX *mem_ctx, void **pstate, #type) @endcode The pstate is the pointer to the private data. The necessary amount of memory (based on data type) is allocated during this call. Within this same memory area all the data from the asynchronous request that need to be preserved for some time should be kept. Dealing with a lack of memory The verification of the returned pointer against NULL is necessary in order to identify a potential lack of memory. There is a special function which helps with this check tevent_req_nomem(). It handles verification both of the talloc memory allocation and of the associated tevent request, and is therefore a very useful function for avoiding unexpected situations. It can easily be used when checking the availability of further memory resources that are required for a tevent request. Imagine an example where additional memory needs arise although no memory resources are currently available. @code bar = talloc(mem_ctx, struct foo); if(tevent_req_nomem (bar, req)) { // handling a problem } @endcode This code ensures that the variable bar, which contains NULL as a result of the unsuccessful satisfaction of its memory requirements, is noticed, and also that the tevent request req declares it exceeds memory capacity, which implies the impossibility of finishing the request as originally programmed. @subsection fini_req Finishing a Request Marking each request as finished is an essential principle of the tevent library. Without marking the request as completed - either successfully or with an error - the tevent loop could not let the appropriate callback be triggered. It is important to understand that this would be a significant threat, because it is not usually a question of one single function which prints some text on a screen, but rather the request is itself probably just a link in a series of other requests. Stopping one request would stop the others, memory resources would not be freed, file descriptors might remain open, communication via socket could be interrupted, and so on. Therefore it is important to think about finishing requests, either successfully or not, and also to prepare functions for all possible scenarios, so that the the callbacks do not process data that are actually invalid or, even worse, in fact non-existent meaning that a segmentation fault may arise.
  • \b Manually - This is the most common type of finishing request. Calling this function sets the request as a TEVENT_REQ_DONE. This is the only purpose of this function and it should be used when everything went well. Typically it is used within the done functions. @code void tevent_req_done (struct tevent_req *req) @endcode Alternatively, the request can end up being unsuccessful. @code bool tevent_req_error (struct tevent_req *req, uint64_t error) @endcode The second argument takes the number of an error (declared by the programmer, for example in an enumerated variable). The function tevent_req_error() sets the status of the request as a TEVENT_REQ_USER_ERROR and also stores the code of error within the structure so it can be used, for example for debugging. The function returns true, if marking the request as an error was processed with no problem - value error passed to this function is not equal to 1.
  • Setting up a timeout for request - A request can be finished virtually, or if the process takes too much time, it can be timed out. This is considered as an error of the request and it leads to calling callback. In the background, this timeout is set through a time event (described in @subpage tevent_events ) which eventually triggers an operation marking the request as a TEVENT_REQ_TIMED_OUT (can not be considered as successfully finished). In case a time out was already set, this operation will overwrite it with a new time value (so the timeout may be lengthened) and if everything is set properly, it returns true. @code bool tevent_req_set_endtime(struct tevent_req *req, struct tevent_context *ev, struct timeval endtime); @endcode
  • Premature Triggering - Imagine a situation in which some part of a nested subrequest ended up with a failure and it is still required to trigger a callback. Such as example might result from lack of memory leading to the impossibility of allocating enough memory requirements for the event to start processing another subrequest, or from a clear intention to skip other procedures and trigger the callback regardless of other progress. In these cases, the function tevent_req_post() is very handy and offers this option. @code struct tevent_req* tevent_req_post (struct tevent_req *req, struct tevent_context *ev); @endcode A request finished in this way does not behave as a time event nor as a file descriptor event but as a immediately scheduled event, and therefore it will be treated according the description laid down in @subpage tevent_events .
@section nested Subrequests - Nested Requests To create more complex and interconnected asynchronous operations, it is possible to submerge a request into another and thus create a so-called subrequest. Subrequests are not represented by any other special structure but they are created from tevent_req_create(). This diagram shows the nesting and life time of each request. The table below describes the same in words, and shows the triggering of functions during the application run. Wrapper represents the trigger of the whole cascade of (sub)requests. It may be e.g. a time or file descriptor event, or another request that was created at a specific time by the function tevent_wakeup_send() which is a slightly exceptional method of creating @code struct tevent_req *tevent_wakeup_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct timeval wakeup_time); @endcode By calling this function, it is possible to create a tevent request which is actually the return value of this function. In summary, it sets the time value of the tevent request’s creation. While using this function it is necessary to use another function in the subrequest’s callback to check for any problems tevent_wakeup_recv() ) @image html tevent_subrequest.png A comprehensive example of nested subrequests can be found in the file echo_server.c. It implements a complete, self-contained echo server with no dependencies but libevent and libtalloc. */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tevent_thread.dox0000660000000000000000000002336500000000000017161 0ustar00rootroot00000000000000/** @page tevent_thread Chapter 6: Tevent with threads @section threads Tevent with threads In order to use tevent with threads, you must first understand how to use the talloc library in threaded programs. For more information about working with talloc, please visit talloc website where tutorial and documentation are located. If a tevent context structure is talloced from a NULL, thread-safe talloc context, then it can be safe to use in a threaded program. The function talloc_disable_null_tracking() must be called from the initial program thread before any talloc calls are made to ensure talloc is thread-safe. Each thread must create it's own tevent context structure as follows tevent_context_init(NULL) and no talloc memory contexts can be shared between threads. Separate threads using tevent in this way can communicate by writing data into file descriptors that are being monitored by a tevent context on another thread. For example (simplified with no error handling): @code Main thread: main() { talloc_disable_null_tracking(); struct tevent_context *master_ev = tevent_context_init(NULL); void *mem_ctx = talloc_new(master_ev); // Create file descriptor to monitor. int pipefds[2]; pipe(pipefds); struct tevent_fd *fde = tevent_add_fd(master_ev, mem_ctx, pipefds[0], // read side of pipe TEVENT_FD_READ, pipe_read_handler, // callback function private_data_pointer); // Create sub thread, pass pipefds[1] write side of pipe to it. // The above code not shown here.. // Process events. tevent_loop_wait(master_ev); // Cleanup if loop exits. talloc_free(master_ev); } @endcode When the subthread writes to pipefds[1], the function pipe_read_handler() will be called in the main thread. @subsection More sophisticated use A popular way to use an event library within threaded programs is to allow a sub-thread to asynchronously schedule a tevent_immediate function call from the event loop of another thread. This can be built out of the basic functions and isolation mechanisms of tevent, but tevent also comes with some utility functions that make this easier, so long as you understand the limitations that using threads with talloc and tevent impose. To allow a tevent context to receive an asynchronous tevent_immediate function callback from another thread, create a struct tevent_thread_proxy * by calling @code struct tevent_thread_proxy *tevent_thread_proxy_create( struct tevent_context *dest_ev_ctx); @endcode This function allocates the internal data structures to allow asynchronous callbacks as a talloc child of the struct tevent_context *, and returns a struct tevent_thread_proxy * that can be passed to another thread. When you have finished receiving asynchronous callbacks, simply talloc_free the struct tevent_thread_proxy *, or talloc_free the struct tevent_context *, which will deallocate the resources used. To schedule an asynchronous tevent_immediate function call from one thread on the tevent loop of another thread, use @code void tevent_thread_proxy_schedule(struct tevent_thread_proxy *tp, struct tevent_immediate **pp_im, tevent_immediate_handler_t handler, void **pp_private_data); @endcode This function causes the function handler() to be invoked as a tevent_immediate callback from the event loop of the thread that created the struct tevent_thread_proxy * (so the owning struct tevent_context * should be long-lived and not in the process of being torn down). The struct tevent_thread_proxy object being used here is a child of the event context of the target thread. So external synchronization mechanisms must be used to ensure that the target object is still in use at the time of the tevent_thread_proxy_schedule() call. In the example below, the request/response nature of the communication ensures this. The struct tevent_immediate **pp_im passed into this function should be a struct tevent_immediate * allocated on a talloc context local to this thread, and will be reparented via talloc_move to be owned by struct tevent_thread_proxy *tp. *pp_im will be set to NULL on successful scheduling of the tevent_immediate call. handler() will be called as a normal tevent_immediate callback from the struct tevent_context * of the destination event loop that created the struct tevent_thread_proxy * Returning from this functions does not mean that the handler has been invoked, merely that it has been scheduled to be called in the destination event loop. Because the calling thread does not wait for the callback to be scheduled and run on the destination thread, this is a fire-and-forget call. If you wish confirmation of the handler() being successfully invoked, you must ensure it replies to the caller in some way. Because of asynchronous nature of this call, the nature of the parameter passed to the destination thread has some restructions. If you don't need parameters, merely pass NULL as the value of void **pp_private_data. If you wish to pass a pointer to data between the threads, it MUST be a pointer to a talloced pointer, which is not part of a talloc-pool, and it must not have a destructor attached. The ownership of the memory pointed to will be passed from the calling thread to the tevent library, and if the receiving thread does not talloc-reparent it to its own contexts, it will be freed once the handler is called. On success, *pp_private will be NULL to signify the talloc memory ownership has been moved. In practice for message passing between threads in event loops these restrictions are not very onerous. The easiest way to to a request-reply pair between tevent loops on different threads is to pass the parameter block of memory back and forth using a reply tevent_thread_proxy_schedule() call. Here is an example (without error checking for simplicity): @code ------------------------------------------------ // Master thread. main() { // Make talloc thread-safe. talloc_disable_null_tracking(); // Create the master event context. struct tevent_context *master_ev = tevent_context_init(NULL); // Create the master thread proxy to allow it to receive // async callbacks from other threads. struct tevent_thread_proxy *master_tp = tevent_thread_proxy_create(master_ev); // Create sub-threads, passing master_tp in // some way to them. // This code not shown.. // Process events. // Function master_callback() below // will be invoked on this thread on // master_ev event context. tevent_loop_wait(master_ev); // Cleanup if loop exits. talloc_free(master_ev); } // Data passed between threads. struct reply_state { struct tevent_thread_proxy *reply_tp; pthread_t thread_id; bool *p_finished; }; // Callback Called in child thread context. static void thread_callback(struct tevent_context *ev, struct tevent_immediate *im, void *private_ptr) { // Move the ownership of what private_ptr // points to from the tevent library back to this thread. struct reply_state *rsp = talloc_get_type_abort(private_ptr, struct reply_state); talloc_steal(ev, rsp); *rsp->p_finished = true; // im will be talloc_freed on return from this call. // but rsp will not. } // Callback Called in master thread context. static void master_callback(struct tevent_context *ev, struct tevent_immediate *im, void *private_ptr) { // Move the ownership of what private_ptr // points to from the tevent library to this thread. struct reply_state *rsp = talloc_get_type_abort(private_ptr, struct reply_state); talloc_steal(ev, rsp); printf("Callback from thread %s\n", thread_id_to_string(rsp->thread_id)); /* Now reply to the thread ! */ tevent_thread_proxy_schedule(rsp->reply_tp, &im, thread_callback, &rsp); // Note - rsp and im are now NULL as the tevent library // owns the memory. } // Child thread. static void *thread_fn(void *private_ptr) { struct tevent_thread_proxy *master_tp = talloc_get_type_abort(private_ptr, struct tevent_thread_proxy); bool finished = false; int ret; // Create our own event context. struct tevent_context *ev = tevent_context_init(NULL); // Create the local thread proxy to allow us to receive // async callbacks from other threads. struct tevent_thread_proxy *local_tp = tevent_thread_proxy_create(master_ev); // Setup the data to send. struct reply_state *rsp = talloc(ev, struct reply_state); rsp->reply_tp = local_tp; rsp->thread_id = pthread_self(); rsp->p_finished = &finished; // Create the immediate event to use. struct tevent_immediate *im = tevent_create_immediate(ev); // Call the master thread. tevent_thread_proxy_schedule(master_tp, &im, master_callback, &rsp); // Note - rsp and im are now NULL as the tevent library // owns the memory. // Wait for the reply. while (!finished) { tevent_loop_once(ev); } // Cleanup. talloc_free(ev); return NULL; } @endcode Note this doesn't have to be a master-subthread communication. Any thread that has access to the struct tevent_thread_proxy * pointer of another thread that has called tevent_thread_proxy_create() can send an async tevent_immediate request. But remember the caveat that external synchronization must be used to ensure the target struct tevent_thread_proxy * object exists at the time of the tevent_thread_proxy_schedule() call or unreproducible crashes will result. */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tevent_tutorial.dox0000660000000000000000000000052000000000000017541 0ustar00rootroot00000000000000/** @page tevent_tutorial The Tutorial @section tevent_tutorial_introduction Introduction Tutorial describing working with tevent library. @section tevent_tutorial_toc Table of contents @subpage tevent_context @subpage tevent_events @subpage tevent_data @subpage tevent_request @subpage tevent_queue @subpage tevent_thread */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doc/tutorials.dox0000660000000000000000000000400100000000000016335 0ustar00rootroot00000000000000/** * @page tevent_queue_tutorial The tevent_queue tutorial * * @section Introduction * * A tevent_queue is used to queue up async requests that must be * serialized. For example writing buffers into a socket must be * serialized. Writing a large lump of data into a socket can require * multiple write(2) or send(2) system calls. If more than one async * request is outstanding to write large buffers into a socket, every * request must individually be completed before the next one begins, * even if multiple syscalls are required. * * To do this, every socket gets assigned a tevent_queue struct. * * Creating a serialized async request follows the usual convention to * return a tevent_req structure with an embedded state structure. To * serialize the work the requests is about to so, instead of directly * starting or doing that work, tevent_queue_add must be called. When it * is time for the serialized async request to do its work, the trigger * callback function tevent_queue_add was given is called. In the example * of writing to a socket, the trigger is called when the write request * can begin accessing the socket. * * How does this engine work behind the scenes? When the queue is empty, * tevent_queue_add schedules an immediate call to the trigger * callback. The trigger callback starts its work, likely by starting * other async subrequests. While these async subrequests are working, * more requests can accumulate in the queue by tevent_queue_add. While * there is no function to explicitly trigger the next waiter in line, it * still works: When the active request in the queue is done, it will be * destroyed by talloc_free. Talloc_free of an serialized async request * that had been added to a queue will trigger the next request in the * queue via a talloc destructor attached to a child of the serialized * request. This way the queue will be kept busy when an async request * finishes. * * @section Example * * @code * Metze: Please add a code example here. * @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/doxy.config0000660000000000000000000023645700000000000015226 0ustar00rootroot00000000000000# Doxyfile 1.8.4 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed # in front of the TAG it is preceding . # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = tevent # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.9.8 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, # Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, # Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. Note that you specify absolute paths here, but also # relative paths, which will be relative from the directory where doxygen is # started. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, # and language is one of the parsers supported by doxygen: IDL, Java, # Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, # C++. For instance to make doxygen treat .inc files as Fortran files (default # is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note # that for custom extensions you also need to set FILE_PATTERNS otherwise the # files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by by putting a % sign in front of the word # or globally by setting AUTOLINK_SUPPORT to NO. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES (the # default) will make doxygen replace the get and set methods by a property in # the documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields or simple typedef fields will be shown # inline in the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO (the default), structs, classes, and unions are shown on a separate # page (for HTML and Man pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can # be an expensive process and often the same symbol appear multiple times in # the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too # small doxygen will become slower. If the cache is too large, memory is wasted. # The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid # range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 # symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = YES # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = YES # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if section-label ... \endif # and \cond section-label ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. Do not use # file names with spaces, bibtex cannot handle them. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = . \ doc # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.cpp \ *.cc \ *.c \ *.h \ *.hh \ *.hpp \ *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */.git/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = doc/img # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be ignored. # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = # If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If left blank doxygen will # generate a default style sheet. Note that it is recommended to use # HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this # tag will in the future become obsolete. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional # user-defined cascading style sheet that is included after the standard # style sheets created by doxygen. Using this option one can overrule # certain style aspects. This is preferred over using HTML_STYLESHEET # since it does not replace the standard style sheet and is therefor more # robust against future updates. Doxygen will copy the style sheet file to # the output directory. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely # identify the documentation publisher. This should be a reverse domain-name # style string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NONE # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and # SVG. The default value is HTML-CSS, which is slower, but has the best # compatibility. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript # pieces of code that will be used on startup of the MathJax code. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. # There are two flavours of web server based search depending on the # EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for # searching and an index file used by the script. When EXTERNAL_SEARCH is # enabled the indexing and searching needs to be provided by external tools. # See the manual for details. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain # the search results. Doxygen ships with an example indexer (doxyindexer) and # search engine (doxysearch.cgi) which are based on the open source search # engine library Xapian. See the manual for configuration details. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will returned the search results when EXTERNAL_SEARCH is enabled. # Doxygen ships with an example search engine (doxysearch) which is based on # the open source search engine library Xapian. See the manual for configuration # details. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id # of to a relative location where the documentation can be found. # The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4 will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images # or other source files which should be copied to the LaTeX output directory. # Note that the files will be copied as-is; there are no commands or markers # available. LATEX_EXTRA_FILES = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- # If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files # that can be used to generate PDF. GENERATE_DOCBOOK = NO # The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be put in # front of it. If left blank docbook will be used as the default path. DOCBOOK_OUTPUT = docbook #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = DOXYGEN \ PRINTF_ATTRIBUTE(x,y)= # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # If the EXTERNAL_PAGES tag is set to YES all external pages will be listed # in the related pages index. If set to NO, only the current project's # pages will be listed. EXTERNAL_PAGES = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # manageable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/echo_server.c0000660000000000000000000003422000000000000015504 0ustar00rootroot00000000000000/** ** NOTE! The following liberal license applies to this sample file only. ** This does NOT imply that all of Samba is released under this license. ** ** This file is meant as a starting point for libtevent users to be used ** in any program linking against the LGPL licensed libtevent. **/ /* * This file is being made available by the Samba Team under the following * license: * * Permission to use, copy, modify, and distribute this sample file for any * purpose is hereby granted without fee. * * This work is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ #include #include #include #include #include #include #include #include #include "tevent.h" #include "talloc.h" /** * @brief Helper function to get a useful unix error from tevent_req */ static bool tevent_req_is_unix_error(struct tevent_req *req, int *perrno) { enum tevent_req_state state; uint64_t err; if (!tevent_req_is_error(req, &state, &err)) { return false; } switch (state) { case TEVENT_REQ_TIMED_OUT: *perrno = ETIMEDOUT; break; case TEVENT_REQ_NO_MEMORY: *perrno = ENOMEM; break; case TEVENT_REQ_USER_ERROR: *perrno = err; break; default: *perrno = EINVAL; break; } return true; } /** * @brief Wrapper around accept(2) */ struct accept_state { struct tevent_fd *fde; int listen_sock; socklen_t addrlen; struct sockaddr_storage addr; int sock; }; static void accept_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data); static struct tevent_req *accept_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, int listen_sock) { struct tevent_req *req; struct accept_state *state; req = tevent_req_create(mem_ctx, &state, struct accept_state); if (req == NULL) { return NULL; } state->listen_sock = listen_sock; state->fde = tevent_add_fd(ev, state, listen_sock, TEVENT_FD_READ, accept_handler, req); if (tevent_req_nomem(state->fde, req)) { return tevent_req_post(req, ev); } return req; } static void accept_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct tevent_req *req = talloc_get_type_abort( private_data, struct tevent_req); struct accept_state *state = tevent_req_data(req, struct accept_state); int ret; TALLOC_FREE(state->fde); if ((flags & TEVENT_FD_READ) == 0) { tevent_req_error(req, EIO); return; } state->addrlen = sizeof(state->addr); ret = accept(state->listen_sock, (struct sockaddr *)&state->addr, &state->addrlen); if (ret == -1) { tevent_req_error(req, errno); return; } smb_set_close_on_exec(ret); state->sock = ret; tevent_req_done(req); } static int accept_recv(struct tevent_req *req, struct sockaddr *paddr, socklen_t *paddrlen, int *perr) { struct accept_state *state = tevent_req_data(req, struct accept_state); int err; if (tevent_req_is_unix_error(req, &err)) { if (perr != NULL) { *perr = err; } return -1; } if (paddr != NULL) { memcpy(paddr, &state->addr, state->addrlen); } if (paddrlen != NULL) { *paddrlen = state->addrlen; } return state->sock; } /** * @brief Wrapper around read(2) */ struct read_state { struct tevent_fd *fde; int fd; void *buf; size_t count; ssize_t nread; }; static void read_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data); static struct tevent_req *read_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, int fd, void *buf, size_t count) { struct tevent_req *req; struct read_state *state; req = tevent_req_create(mem_ctx, &state, struct read_state); if (req == NULL) { return NULL; } state->fd = fd; state->buf = buf; state->count = count; state->fde = tevent_add_fd(ev, state, fd, TEVENT_FD_READ, read_handler, req); if (tevent_req_nomem(state->fde, req)) { return tevent_req_post(req, ev); } return req; } static void read_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct tevent_req *req = talloc_get_type_abort( private_data, struct tevent_req); struct read_state *state = tevent_req_data(req, struct read_state); ssize_t ret; TALLOC_FREE(state->fde); if ((flags & TEVENT_FD_READ) == 0) { tevent_req_error(req, EIO); return; } ret = read(state->fd, state->buf, state->count); if (ret == -1) { tevent_req_error(req, errno); return; } state->nread = ret; tevent_req_done(req); } static ssize_t read_recv(struct tevent_req *req, int *perr) { struct read_state *state = tevent_req_data(req, struct read_state); int err; if (tevent_req_is_unix_error(req, &err)) { if (perr != NULL) { *perr = err; } return -1; } return state->nread; } /** * @brief Wrapper around write(2) */ struct write_state { struct tevent_fd *fde; int fd; const void *buf; size_t count; ssize_t nwritten; }; static void write_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data); static struct tevent_req *write_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, int fd, const void *buf, size_t count) { struct tevent_req *req; struct write_state *state; req = tevent_req_create(mem_ctx, &state, struct write_state); if (req == NULL) { return NULL; } state->fd = fd; state->buf = buf; state->count = count; state->fde = tevent_add_fd(ev, state, fd, TEVENT_FD_WRITE, write_handler, req); if (tevent_req_nomem(state->fde, req)) { return tevent_req_post(req, ev); } return req; } static void write_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct tevent_req *req = talloc_get_type_abort( private_data, struct tevent_req); struct write_state *state = tevent_req_data(req, struct write_state); ssize_t ret; TALLOC_FREE(state->fde); if ((flags & TEVENT_FD_WRITE) == 0) { tevent_req_error(req, EIO); return; } ret = write(state->fd, state->buf, state->count); if (ret == -1) { tevent_req_error(req, errno); return; } state->nwritten = ret; tevent_req_done(req); } static ssize_t write_recv(struct tevent_req *req, int *perr) { struct write_state *state = tevent_req_data(req, struct write_state); int err; if (tevent_req_is_unix_error(req, &err)) { if (perr != NULL) { *perr = err; } return -1; } return state->nwritten; } /** * @brief Wrapper function that deals with short writes */ struct writeall_state { struct tevent_context *ev; int fd; const void *buf; size_t count; size_t nwritten; }; static void writeall_done(struct tevent_req *subreq); static struct tevent_req *writeall_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, int fd, const void *buf, size_t count) { struct tevent_req *req, *subreq; struct writeall_state *state; req = tevent_req_create(mem_ctx, &state, struct writeall_state); if (req == NULL) { return NULL; } state->ev = ev; state->fd = fd; state->buf = buf; state->count = count; state->nwritten = 0; subreq = write_send(state, state->ev, state->fd, ((char *)state->buf)+state->nwritten, state->count - state->nwritten); if (tevent_req_nomem(subreq, req)) { return tevent_req_post(req, ev); } tevent_req_set_callback(subreq, writeall_done, req); return req; } static void writeall_done(struct tevent_req *subreq) { struct tevent_req *req = tevent_req_callback_data( subreq, struct tevent_req); struct writeall_state *state = tevent_req_data( req, struct writeall_state); ssize_t nwritten; int err = 0; nwritten = write_recv(subreq, &err); TALLOC_FREE(subreq); if (nwritten == -1) { tevent_req_error(req, err); return; } state->nwritten += nwritten; if (state->nwritten < state->count) { subreq = write_send(state, state->ev, state->fd, ((char *)state->buf)+state->nwritten, state->count - state->nwritten); if (tevent_req_nomem(subreq, req)) { return; } tevent_req_set_callback(subreq, writeall_done, req); return; } tevent_req_done(req); } static ssize_t writeall_recv(struct tevent_req *req, int *perr) { struct writeall_state *state = tevent_req_data( req, struct writeall_state); int err; if (tevent_req_is_unix_error(req, &err)) { if (perr != NULL) { *perr = err; } return -1; } return state->nwritten; } /** * @brief Async echo handler code dealing with one client */ struct echo_state { struct tevent_context *ev; int fd; uint8_t *buf; }; static int echo_state_destructor(struct echo_state *s); static void echo_read_done(struct tevent_req *subreq); static void echo_writeall_done(struct tevent_req *subreq); static struct tevent_req *echo_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, int fd, size_t bufsize) { struct tevent_req *req, *subreq; struct echo_state *state; req = tevent_req_create(mem_ctx, &state, struct echo_state); if (req == NULL) { return NULL; } state->ev = ev; state->fd = fd; talloc_set_destructor(state, echo_state_destructor); state->buf = talloc_array(state, uint8_t, bufsize); if (tevent_req_nomem(state->buf, req)) { return tevent_req_post(req, ev); } subreq = read_send(state, state->ev, state->fd, state->buf, talloc_get_size(state->buf)); if (tevent_req_nomem(subreq, req)) { return tevent_req_post(req, ev); } tevent_req_set_callback(subreq, echo_read_done, req); return req; } static int echo_state_destructor(struct echo_state *s) { if (s->fd != -1) { printf("Closing client fd %d\n", s->fd); close(s->fd); s->fd = -1; } return 0; } static void echo_read_done(struct tevent_req *subreq) { struct tevent_req *req = tevent_req_callback_data( subreq, struct tevent_req); struct echo_state *state = tevent_req_data( req, struct echo_state); ssize_t nread; int err; nread = read_recv(subreq, &err); TALLOC_FREE(subreq); if (nread == -1) { tevent_req_error(req, err); return; } if (nread == 0) { tevent_req_done(req); return; } subreq = writeall_send(state, state->ev, state->fd, state->buf, nread); if (tevent_req_nomem(subreq, req)) { return; } tevent_req_set_callback(subreq, echo_writeall_done, req); } static void echo_writeall_done(struct tevent_req *subreq) { struct tevent_req *req = tevent_req_callback_data( subreq, struct tevent_req); struct echo_state *state = tevent_req_data( req, struct echo_state); ssize_t nwritten; int err; nwritten = writeall_recv(subreq, &err); TALLOC_FREE(subreq); if (nwritten == -1) { if (err == EPIPE) { tevent_req_done(req); return; } tevent_req_error(req, err); return; } subreq = read_send(state, state->ev, state->fd, state->buf, talloc_get_size(state->buf)); if (tevent_req_nomem(subreq, req)) { return; } tevent_req_set_callback(subreq, echo_read_done, req); } static bool echo_recv(struct tevent_req *req, int *perr) { int err; if (tevent_req_is_unix_error(req, &err)) { *perr = err; return false; } return true; } /** * @brief Full echo handler code accepting and handling clients */ struct echo_server_state { struct tevent_context *ev; int listen_sock; }; static void echo_server_accepted(struct tevent_req *subreq); static void echo_server_client_done(struct tevent_req *subreq); static struct tevent_req *echo_server_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, int listen_sock) { struct tevent_req *req, *subreq; struct echo_server_state *state; req = tevent_req_create(mem_ctx, &state, struct echo_server_state); if (req == NULL) { return NULL; } state->ev = ev; state->listen_sock = listen_sock; subreq = accept_send(state, state->ev, state->listen_sock); if (tevent_req_nomem(subreq, req)) { return tevent_req_post(req, ev); } tevent_req_set_callback(subreq, echo_server_accepted, req); return req; } static void echo_server_accepted(struct tevent_req *subreq) { struct tevent_req *req = tevent_req_callback_data( subreq, struct tevent_req); struct echo_server_state *state = tevent_req_data( req, struct echo_server_state); int sock, err; sock = accept_recv(subreq, NULL, NULL, &err); TALLOC_FREE(subreq); if (sock == -1) { tevent_req_error(req, err); return; } printf("new client fd %d\n", sock); subreq = echo_send(state, state->ev, sock, 100); if (tevent_req_nomem(subreq, req)) { return; } tevent_req_set_callback(subreq, echo_server_client_done, req); subreq = accept_send(state, state->ev, state->listen_sock); if (tevent_req_nomem(subreq, req)) { return; } tevent_req_set_callback(subreq, echo_server_accepted, req); } static void echo_server_client_done(struct tevent_req *subreq) { bool ret; int err; ret = echo_recv(subreq, &err); TALLOC_FREE(subreq); if (ret) { printf("Client done\n"); } else { printf("Client failed: %s\n", strerror(err)); } } static bool echo_server_recv(struct tevent_req *req, int *perr) { int err; if (tevent_req_is_unix_error(req, &err)) { *perr = err; return false; } return true; } int main(int argc, const char **argv) { int ret, port, listen_sock, err; struct tevent_context *ev; struct sockaddr_in addr; struct tevent_req *req; bool result; if (argc != 2) { fprintf(stderr, "Usage: %s \n", argv[0]); exit(1); } port = atoi(argv[1]); printf("listening on port %d\n", port); listen_sock = socket(AF_INET, SOCK_STREAM, 0); if (listen_sock == -1) { perror("socket() failed"); exit(1); } addr = (struct sockaddr_in) { .sin_family = AF_INET, .sin_port = htons(port) }; ret = bind(listen_sock, (struct sockaddr *)&addr, sizeof(addr)); if (ret == -1) { perror("bind() failed"); exit(1); } ret = listen(listen_sock, 5); if (ret == -1) { perror("listen() failed"); exit(1); } ev = tevent_context_init(NULL); if (ev == NULL) { fprintf(stderr, "tevent_context_init failed\n"); exit(1); } req = echo_server_send(ev, ev, listen_sock); if (req == NULL) { fprintf(stderr, "echo_server_send failed\n"); exit(1); } if (!tevent_req_poll(req, ev)) { perror("tevent_req_poll() failed"); exit(1); } result = echo_server_recv(req, &err); TALLOC_FREE(req); if (!result) { fprintf(stderr, "echo_server failed: %s\n", strerror(err)); exit(1); } return 0; } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/pytevent.c0000660000000000000000000005530600000000000015066 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Python bindings for tevent Copyright (C) Jelmer Vernooij 2010 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include "replace.h" #include #if PY_MAJOR_VERSION >= 3 #define PyLong_FromLong PyLong_FromLong #endif /* discard signature of 'func' in favour of 'target_sig' */ #define PY_DISCARD_FUNC_SIG(target_sig, func) (target_sig)(void(*)(void))func void init_tevent(void); typedef struct { PyObject_HEAD struct tevent_context *ev; } TeventContext_Object; typedef struct { PyObject_HEAD struct tevent_queue *queue; } TeventQueue_Object; typedef struct { PyObject_HEAD struct tevent_req *req; } TeventReq_Object; typedef struct { PyObject_HEAD struct tevent_signal *signal; } TeventSignal_Object; typedef struct { PyObject_HEAD struct tevent_timer *timer; PyObject *callback; } TeventTimer_Object; typedef struct { PyObject_HEAD struct tevent_fd *fd; } TeventFd_Object; static PyTypeObject TeventContext_Type; static PyTypeObject TeventReq_Type; static PyTypeObject TeventQueue_Type; static PyTypeObject TeventSignal_Type; static PyTypeObject TeventTimer_Type; static PyTypeObject TeventFd_Type; static int py_context_init(struct tevent_context *ev) { /* FIXME */ return 0; } static struct tevent_fd *py_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { /* FIXME */ return NULL; } static void py_set_fd_close_fn(struct tevent_fd *fde, tevent_fd_close_fn_t close_fn) { /* FIXME */ } static uint16_t py_get_fd_flags(struct tevent_fd *fde) { /* FIXME */ return 0; } static void py_set_fd_flags(struct tevent_fd *fde, uint16_t flags) { /* FIXME */ } /* timed_event functions */ static struct tevent_timer *py_add_timer(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location) { /* FIXME */ return NULL; } /* immediate event functions */ static void py_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ev, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location) { /* FIXME */ } /* signal functions */ static struct tevent_signal *py_add_signal(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data, const char *handler_name, const char *location) { /* FIXME */ return NULL; } /* loop functions */ static int py_loop_once(struct tevent_context *ev, const char *location) { /* FIXME */ return 0; } static int py_loop_wait(struct tevent_context *ev, const char *location) { /* FIXME */ return 0; } const static struct tevent_ops py_tevent_ops = { .context_init = py_context_init, .add_fd = py_add_fd, .set_fd_close_fn = py_set_fd_close_fn, .get_fd_flags = py_get_fd_flags, .set_fd_flags = py_set_fd_flags, .add_timer = py_add_timer, .schedule_immediate = py_schedule_immediate, .add_signal = py_add_signal, .loop_wait = py_loop_wait, .loop_once = py_loop_once, }; static PyObject *py_register_backend(PyObject *self, PyObject *args) { PyObject *name, *py_backend; if (!PyArg_ParseTuple(args, "O", &py_backend)) return NULL; name = PyObject_GetAttrString(py_backend, "name"); if (name == NULL) { PyErr_SetNone(PyExc_AttributeError); return NULL; } if (!PyUnicode_Check(name)) { PyErr_SetNone(PyExc_TypeError); Py_DECREF(name); return NULL; } if (!tevent_register_backend(PyUnicode_AsUTF8(name), &py_tevent_ops)) { /* FIXME: What to do with backend */ PyErr_SetNone(PyExc_RuntimeError); Py_DECREF(name); return NULL; } Py_DECREF(name); Py_RETURN_NONE; } static PyObject *py_tevent_context_reinitialise(TeventContext_Object *self, PyObject *Py_UNUSED(ignored)) { int ret = tevent_re_initialise(self->ev); if (ret != 0) { PyErr_SetNone(PyExc_RuntimeError); return NULL; } Py_RETURN_NONE; } static PyObject *py_tevent_queue_stop(TeventQueue_Object *self, PyObject *Py_UNUSED(ignored)) { tevent_queue_stop(self->queue); Py_RETURN_NONE; } static PyObject *py_tevent_queue_start(TeventQueue_Object *self, PyObject *Py_UNUSED(ignored)) { tevent_queue_start(self->queue); Py_RETURN_NONE; } static void py_queue_trigger(struct tevent_req *req, void *private_data) { PyObject *callback = private_data, *ret; ret = PyObject_CallFunction(callback, discard_const_p(char, "")); Py_XDECREF(ret); } static PyObject *py_tevent_queue_add(TeventQueue_Object *self, PyObject *args) { TeventContext_Object *py_ev; TeventReq_Object *py_req; PyObject *trigger; bool ret; if (!PyArg_ParseTuple(args, "O!O!O", &TeventContext_Type, &py_ev, &TeventReq_Type, &py_req, &trigger)) return NULL; Py_INCREF(trigger); ret = tevent_queue_add(self->queue, py_ev->ev, py_req->req, py_queue_trigger, trigger); if (!ret) { PyErr_SetString(PyExc_RuntimeError, "queue add failed"); Py_DECREF(trigger); return NULL; } Py_RETURN_NONE; } static PyMethodDef py_tevent_queue_methods[] = { { "stop", (PyCFunction)py_tevent_queue_stop, METH_NOARGS, "S.stop()" }, { "start", (PyCFunction)py_tevent_queue_start, METH_NOARGS, "S.start()" }, { "add", (PyCFunction)py_tevent_queue_add, METH_VARARGS, "S.add(ctx, req, trigger, baton)" }, {0}, }; static PyObject *py_tevent_context_wakeup_send(PyObject *self, PyObject *args) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_context_loop_wait(TeventContext_Object *self, PyObject *Py_UNUSED(ignored)) { if (tevent_loop_wait(self->ev) != 0) { PyErr_SetNone(PyExc_RuntimeError); return NULL; } Py_RETURN_NONE; } static PyObject *py_tevent_context_loop_once(TeventContext_Object *self, PyObject *Py_UNUSED(ignored)) { if (tevent_loop_once(self->ev) != 0) { PyErr_SetNone(PyExc_RuntimeError); return NULL; } Py_RETURN_NONE; } static void py_tevent_signal_handler(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data) { PyObject *callback = (PyObject *)private_data, *ret; ret = PyObject_CallFunction(callback, discard_const_p(char, "ii"), signum, count); Py_XDECREF(ret); } static void py_tevent_signal_dealloc(TeventSignal_Object *self) { talloc_free(self->signal); PyObject_Del(self); } static PyTypeObject TeventSignal_Type = { .tp_name = "tevent.Signal", .tp_basicsize = sizeof(TeventSignal_Object), .tp_dealloc = (destructor)py_tevent_signal_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, }; static PyObject *py_tevent_context_add_signal(TeventContext_Object *self, PyObject *args) { int signum, sa_flags; PyObject *handler; struct tevent_signal *sig; TeventSignal_Object *ret; if (!PyArg_ParseTuple(args, "iiO", &signum, &sa_flags, &handler)) return NULL; Py_INCREF(handler); sig = tevent_add_signal(self->ev, NULL, signum, sa_flags, py_tevent_signal_handler, handler); ret = PyObject_New(TeventSignal_Object, &TeventSignal_Type); if (ret == NULL) { PyErr_NoMemory(); talloc_free(sig); return NULL; } ret->signal = sig; return (PyObject *)ret; } static void py_timer_handler(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data) { TeventTimer_Object *self = private_data; PyObject *ret; ret = PyObject_CallFunction(self->callback, discard_const_p(char, "l"), te); if (ret == NULL) { /* No Python stack to propagate exception to; just print traceback */ PyErr_PrintEx(0); } Py_XDECREF(ret); } static void py_tevent_timer_dealloc(TeventTimer_Object *self) { if (self->timer) { talloc_free(self->timer); } Py_DECREF(self->callback); PyObject_Del(self); } static int py_tevent_timer_traverse(TeventTimer_Object *self, visitproc visit, void *arg) { Py_VISIT(self->callback); return 0; } static PyObject* py_tevent_timer_get_active(TeventTimer_Object *self, PyObject *Py_UNUSED(ignored)) { return PyBool_FromLong(self->timer != NULL); } struct PyGetSetDef py_tevent_timer_getset[] = { { .name = discard_const_p(char, "active"), .get = (getter)py_tevent_timer_get_active, .doc = discard_const_p(char, "true if the timer is scheduled to run"), }, {0}, }; static PyTypeObject TeventTimer_Type = { .tp_name = "tevent.Timer", .tp_basicsize = sizeof(TeventTimer_Object), .tp_dealloc = (destructor)py_tevent_timer_dealloc, .tp_traverse = (traverseproc)py_tevent_timer_traverse, .tp_getset = py_tevent_timer_getset, .tp_flags = Py_TPFLAGS_DEFAULT, }; struct TeventTimer_Object_ref { TeventTimer_Object *obj; }; static int TeventTimer_Object_ref_destructor(struct TeventTimer_Object_ref *ref) { ref->obj->timer = NULL; Py_DECREF(ref->obj); return 0; } static PyObject *py_tevent_context_add_timer_internal(TeventContext_Object *self, struct timeval next_event, PyObject *callback) { /* Ownership notes: * * There are 5 pieces in play; two tevent contexts and 3 Python objects: * - The tevent timer * - The tevent context * - The Python context -- "self" * - The Python timer (TeventTimer_Object) -- "ret" * - The Python callback function -- "callback" * * We only use the Python context for getting the tevent context, * afterwards it can be destroyed. * * The tevent context owns the tevent timer. * * The tevent timer holds a reference to the Python timer, so the Python * timer must always outlive the tevent timer. * The Python timer has a pointer to the tevent timer; a destructor is * used to set this to NULL when the tevent timer is deallocated. * * The tevent timer can be deallocated in these cases: * 1) when the context is destroyed * 2) after the event fires * Posssibly, API might be added to cancel (free the tevent timer). * * The Python timer holds a reference to the callback. */ TeventTimer_Object *ret; struct TeventTimer_Object_ref *ref; ret = PyObject_New(TeventTimer_Object, &TeventTimer_Type); if (ret == NULL) { PyErr_NoMemory(); return NULL; } Py_INCREF(callback); ret->callback = callback; ret->timer = tevent_add_timer(self->ev, NULL, next_event, py_timer_handler, ret); if (ret->timer == NULL) { Py_DECREF(ret); PyErr_SetString(PyExc_RuntimeError, "Could not initialize timer"); return NULL; } ref = talloc(ret->timer, struct TeventTimer_Object_ref); if (ref == NULL) { talloc_free(ret->timer); Py_DECREF(ret); PyErr_SetString(PyExc_RuntimeError, "Could not initialize timer"); return NULL; } Py_INCREF(ret); ref->obj = ret; talloc_set_destructor(ref, TeventTimer_Object_ref_destructor); return (PyObject *)ret; } static PyObject *py_tevent_context_add_timer(TeventContext_Object *self, PyObject *args) { struct timeval next_event; PyObject *callback; double secs, usecs; if (!PyArg_ParseTuple(args, "dO", &secs, &callback)){ return NULL; } next_event.tv_sec = secs; usecs = (secs - next_event.tv_sec) * 1000000.0; next_event.tv_usec = usecs; return py_tevent_context_add_timer_internal(self, next_event, callback); } static PyObject *py_tevent_context_add_timer_offset(TeventContext_Object *self, PyObject *args) { struct timeval next_event; double offset; int seconds; PyObject *callback; if (!PyArg_ParseTuple(args, "dO", &offset, &callback)) return NULL; seconds = offset; offset -= seconds; next_event = tevent_timeval_current_ofs(seconds, (int)(offset*1000000)); return py_tevent_context_add_timer_internal(self, next_event, callback); } static void py_fd_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { PyObject *callback = private_data, *ret; ret = PyObject_CallFunction(callback, discard_const_p(char, "i"), flags); Py_XDECREF(ret); } static void py_tevent_fp_dealloc(TeventFd_Object *self) { talloc_free(self->fd); PyObject_Del(self); } static PyTypeObject TeventFd_Type = { .tp_name = "tevent.Fd", .tp_basicsize = sizeof(TeventFd_Object), .tp_dealloc = (destructor)py_tevent_fp_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, }; static PyObject *py_tevent_context_add_fd(TeventContext_Object *self, PyObject *args) { int fd, flags; PyObject *handler; struct tevent_fd *tfd; TeventFd_Object *ret; if (!PyArg_ParseTuple(args, "iiO", &fd, &flags, &handler)) return NULL; tfd = tevent_add_fd(self->ev, NULL, fd, flags, py_fd_handler, handler); if (tfd == NULL) { PyErr_SetNone(PyExc_RuntimeError); return NULL; } ret = PyObject_New(TeventFd_Object, &TeventFd_Type); if (ret == NULL) { talloc_free(tfd); return NULL; } ret->fd = tfd; return (PyObject *)ret; } static PyMethodDef py_tevent_context_methods[] = { { "reinitialise", (PyCFunction)py_tevent_context_reinitialise, METH_NOARGS, "S.reinitialise()" }, { "wakeup_send", (PyCFunction)py_tevent_context_wakeup_send, METH_VARARGS, "S.wakeup_send(wakeup_time) -> req" }, { "loop_wait", (PyCFunction)py_tevent_context_loop_wait, METH_NOARGS, "S.loop_wait()" }, { "loop_once", (PyCFunction)py_tevent_context_loop_once, METH_NOARGS, "S.loop_once()" }, { "add_signal", (PyCFunction)py_tevent_context_add_signal, METH_VARARGS, "S.add_signal(signum, sa_flags, handler) -> signal" }, { "add_timer", (PyCFunction)py_tevent_context_add_timer, METH_VARARGS, "S.add_timer(next_event, handler) -> timer" }, { "add_timer_offset", (PyCFunction)py_tevent_context_add_timer_offset, METH_VARARGS, "S.add_timer(offset_seconds, handler) -> timer" }, { "add_fd", (PyCFunction)py_tevent_context_add_fd, METH_VARARGS, "S.add_fd(fd, flags, handler) -> fd" }, {0}, }; static PyObject *py_tevent_req_wakeup_recv(PyObject *self, PyObject *Py_UNUSED(ignored)) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_received(PyObject *self, PyObject *Py_UNUSED(ignored)) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_is_error(PyObject *self, PyObject *Py_UNUSED(ignored)) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_poll(PyObject *self, PyObject *Py_UNUSED(ignored)) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_is_in_progress(PyObject *self, PyObject *Py_UNUSED(ignored)) { /* FIXME */ Py_RETURN_NONE; } static PyGetSetDef py_tevent_req_getsetters[] = { { .name = discard_const_p(char, "in_progress"), .get = (getter)py_tevent_req_is_in_progress, .doc = discard_const_p(char, "Whether the request is in progress"), }, {0} }; static PyObject *py_tevent_req_post(PyObject *self, PyObject *args) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_set_error(PyObject *self, PyObject *args) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_done(PyObject *self, PyObject *Py_UNUSED(ignored)) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_notify_callback(PyObject *self, PyObject *Py_UNUSED(ignored)) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_set_endtime(PyObject *self, PyObject *args) { /* FIXME */ Py_RETURN_NONE; } static PyObject *py_tevent_req_cancel(TeventReq_Object *self, PyObject *Py_UNUSED(ignored)) { if (!tevent_req_cancel(self->req)) { PyErr_SetNone(PyExc_RuntimeError); return NULL; } Py_RETURN_NONE; } static PyMethodDef py_tevent_req_methods[] = { { "wakeup_recv", (PyCFunction)py_tevent_req_wakeup_recv, METH_NOARGS, "Wakeup received" }, { "received", (PyCFunction)py_tevent_req_received, METH_NOARGS, "Receive finished" }, { "is_error", (PyCFunction)py_tevent_req_is_error, METH_NOARGS, "is_error() -> (error, state)" }, { "poll", (PyCFunction)py_tevent_req_poll, METH_VARARGS, "poll(ctx)" }, { "post", (PyCFunction)py_tevent_req_post, METH_VARARGS, "post(ctx) -> req" }, { "set_error", (PyCFunction)py_tevent_req_set_error, METH_VARARGS, "set_error(error)" }, { "done", (PyCFunction)py_tevent_req_done, METH_NOARGS, "done()" }, { "notify_callback", (PyCFunction)py_tevent_req_notify_callback, METH_NOARGS, "notify_callback()" }, { "set_endtime", (PyCFunction)py_tevent_req_set_endtime, METH_VARARGS, "set_endtime(ctx, endtime)" }, { "cancel", (PyCFunction)py_tevent_req_cancel, METH_NOARGS, "cancel()" }, {0} }; static void py_tevent_req_dealloc(TeventReq_Object *self) { talloc_free(self->req); PyObject_DEL(self); } static PyTypeObject TeventReq_Type = { .tp_name = "tevent.Request", .tp_basicsize = sizeof(TeventReq_Object), .tp_methods = py_tevent_req_methods, .tp_dealloc = (destructor)py_tevent_req_dealloc, .tp_getset = py_tevent_req_getsetters, /* FIXME: .tp_new = py_tevent_req_new, */ }; static PyObject *py_tevent_queue_get_length(TeventQueue_Object *self, PyObject *Py_UNUSED(ignored)) { return PyLong_FromLong(tevent_queue_length(self->queue)); } static PyGetSetDef py_tevent_queue_getsetters[] = { { .name = discard_const_p(char, "length"), .get = (getter)py_tevent_queue_get_length, .doc = discard_const_p(char, "The number of elements in the queue."), }, {0}, }; static void py_tevent_queue_dealloc(TeventQueue_Object *self) { talloc_free(self->queue); PyObject_Del(self); } static PyTypeObject TeventQueue_Type = { .tp_name = "tevent.Queue", .tp_basicsize = sizeof(TeventQueue_Object), .tp_dealloc = (destructor)py_tevent_queue_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_getset = py_tevent_queue_getsetters, .tp_methods = py_tevent_queue_methods, }; static PyObject *py_tevent_context_signal_support(PyObject *_self, PyObject *Py_UNUSED(ignored)) { TeventContext_Object *self = (TeventContext_Object *)_self; return PyBool_FromLong(tevent_signal_support(self->ev)); } static PyGetSetDef py_tevent_context_getsetters[] = { { .name = discard_const_p(char, "signal_support"), .get = PY_DISCARD_FUNC_SIG(getter, py_tevent_context_signal_support), .doc = discard_const_p(char, "if this platform and tevent context support signal handling"), }, {0} }; static void py_tevent_context_dealloc(TeventContext_Object *self) { talloc_free(self->ev); PyObject_Del(self); } static PyObject *py_tevent_context_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { const char * const kwnames[] = { "name", NULL }; char *name = NULL; struct tevent_context *ev; TeventContext_Object *ret; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", discard_const_p(char *, kwnames), &name)) return NULL; if (name == NULL) { ev = tevent_context_init(NULL); } else { ev = tevent_context_init_byname(NULL, name); } if (ev == NULL) { PyErr_SetNone(PyExc_RuntimeError); return NULL; } ret = PyObject_New(TeventContext_Object, type); if (ret == NULL) { PyErr_NoMemory(); talloc_free(ev); return NULL; } ret->ev = ev; return (PyObject *)ret; } static PyTypeObject TeventContext_Type = { .tp_name = "tevent.Context", .tp_new = py_tevent_context_new, .tp_basicsize = sizeof(TeventContext_Object), .tp_dealloc = (destructor)py_tevent_context_dealloc, .tp_methods = py_tevent_context_methods, .tp_getset = py_tevent_context_getsetters, .tp_flags = Py_TPFLAGS_DEFAULT, }; static PyObject *py_set_default_backend(PyObject *self, PyObject *args) { char *backend_name; if (!PyArg_ParseTuple(args, "s", &backend_name)) return NULL; tevent_set_default_backend(backend_name); Py_RETURN_NONE; } static PyObject *py_backend_list(PyObject *self, PyObject *Py_UNUSED(ignored)) { PyObject *ret = NULL; PyObject *string = NULL; int i, result; const char **backends = NULL; ret = PyList_New(0); if (ret == NULL) { return NULL; } backends = tevent_backend_list(NULL); if (backends == NULL) { PyErr_SetNone(PyExc_RuntimeError); goto err; } for (i = 0; backends[i]; i++) { string = PyUnicode_FromString(backends[i]); if (!string) { goto err; } result = PyList_Append(ret, string); if (result) { goto err; } Py_DECREF(string); string = NULL; } talloc_free(backends); return ret; err: Py_XDECREF(ret); Py_XDECREF(string); talloc_free(backends); return NULL; } static PyMethodDef tevent_methods[] = { { "register_backend", (PyCFunction)py_register_backend, METH_VARARGS, "register_backend(backend)" }, { "set_default_backend", (PyCFunction)py_set_default_backend, METH_VARARGS, "set_default_backend(backend)" }, { "backend_list", (PyCFunction)py_backend_list, METH_NOARGS, "backend_list() -> list" }, {0}, }; #define MODULE_DOC PyDoc_STR("Python wrapping of talloc-maintained objects.") #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, .m_name = "_tevent", .m_doc = MODULE_DOC, .m_size = -1, .m_methods = tevent_methods, }; #endif PyObject * module_init(void); PyObject * module_init(void) { PyObject *m; if (PyType_Ready(&TeventContext_Type) < 0) return NULL; if (PyType_Ready(&TeventQueue_Type) < 0) return NULL; if (PyType_Ready(&TeventReq_Type) < 0) return NULL; if (PyType_Ready(&TeventSignal_Type) < 0) return NULL; if (PyType_Ready(&TeventTimer_Type) < 0) return NULL; if (PyType_Ready(&TeventFd_Type) < 0) return NULL; #if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&moduledef); #else m = Py_InitModule3("_tevent", tevent_methods, MODULE_DOC); #endif if (m == NULL) return NULL; Py_INCREF(&TeventContext_Type); PyModule_AddObject(m, "Context", (PyObject *)&TeventContext_Type); Py_INCREF(&TeventQueue_Type); PyModule_AddObject(m, "Queue", (PyObject *)&TeventQueue_Type); Py_INCREF(&TeventReq_Type); PyModule_AddObject(m, "Request", (PyObject *)&TeventReq_Type); Py_INCREF(&TeventSignal_Type); PyModule_AddObject(m, "Signal", (PyObject *)&TeventSignal_Type); Py_INCREF(&TeventTimer_Type); PyModule_AddObject(m, "Timer", (PyObject *)&TeventTimer_Type); Py_INCREF(&TeventFd_Type); PyModule_AddObject(m, "Fd", (PyObject *)&TeventFd_Type); PyModule_AddStringConstant(m, "__version__", PACKAGE_VERSION); return m; } #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit__tevent(void); PyMODINIT_FUNC PyInit__tevent(void) { return module_init(); } #else void init_tevent(void); void init_tevent(void) { module_init(); } #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/test_req.c0000660000000000000000000001641700000000000015036 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * * testing of some tevent_req aspects * * Copyright (C) Volker Lendecke 2018 * * ** NOTE! The following LGPL license applies to the tevent * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "includes.h" #include "tevent.h" #include "torture/torture.h" #include "torture/local/proto.h" #include "lib/util/tevent_unix.h" #include "lib/util/tevent_req_profile.h" #include "lib/util/time_basic.h" struct tevent_req_create_state { uint8_t val; }; static bool test_tevent_req_create(struct torture_context *tctx, const void *test_data) { struct tevent_req *req; struct tevent_req_create_state *state; req = tevent_req_create(tctx, &state, struct tevent_req_create_state); torture_assert_not_null(tctx, req, "tevent_req_create failed\n"); torture_assert_int_equal(tctx, state->val, 0, "state not initialized\n"); TALLOC_FREE(req); return true; } struct profile1_state { uint8_t dummy; }; static bool test_tevent_req_profile1(struct torture_context *tctx, const void *test_data) { struct tevent_req *req; struct profile1_state *state; const struct tevent_req_profile *p1; struct tevent_req_profile *p2; struct timeval start, stop; bool ok; int cmp; req = tevent_req_create(tctx, &state, struct profile1_state); torture_assert_not_null(tctx, req, "tevent_req_create failed\n"); p1 = tevent_req_get_profile(req); torture_assert(tctx, p1 == NULL, "profile not initialized to NULL\n"); ok = tevent_req_set_profile(req); torture_assert(tctx, ok, "set_profile failed\n"); tevent_req_done(req); p2 = tevent_req_move_profile(req, tctx); torture_assert_not_null(tctx, p2, "get_profile failed\n"); /* Demonstrate sure "p2" outlives req */ TALLOC_FREE(req); tevent_req_profile_get_start(p2, NULL, &start); tevent_req_profile_get_stop(p2, NULL, &stop); cmp = tevent_timeval_compare(&start, &stop); torture_assert(tctx, cmp <= 0, "stop before start\n"); TALLOC_FREE(p2); return true; } struct profile2_state { uint8_t dummy; }; static void profile2_done(struct tevent_req *subreq); static struct tevent_req *profile2_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev) { struct tevent_req *req, *subreq; struct profile2_state *state; bool ok; req = tevent_req_create(mem_ctx, &state, struct profile2_state); if (req == NULL) { return NULL; } ok = tevent_req_set_profile(req); if (!ok) { return tevent_req_post(req, ev); } subreq = tevent_wakeup_send( state, ev, tevent_timeval_current_ofs(0, 1)); if (tevent_req_nomem(subreq, req)) { return tevent_req_post(req, ev); } tevent_req_set_callback(subreq, profile2_done, req); return req; } static void profile2_done(struct tevent_req *subreq) { struct tevent_req *req = tevent_req_callback_data( subreq, struct tevent_req); bool ok; ok = tevent_wakeup_recv(subreq); if (!ok) { tevent_req_oom(req); return; } tevent_req_done(req); } static int profile2_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx, struct tevent_req_profile **profile) { int err; if (tevent_req_is_unix_error(req, &err)) { return err; } *profile = tevent_req_move_profile(req, mem_ctx); return 0; } static bool test_tevent_req_profile2(struct torture_context *tctx, const void *test_data) { struct tevent_context *ev; struct tevent_req *req; struct tevent_req_profile *p1 = NULL; struct tevent_req_profile *p2 = NULL; const char *str1, *str2; struct timeval tv1, tv2; pid_t pid1, pid2; enum tevent_req_state state1, state2; uint64_t err1, err2; char *printstring; ssize_t pack_len; int err; bool ok; ev = samba_tevent_context_init(tctx); torture_assert_not_null(tctx, ev, "samba_tevent_context_init failed\n"); req = profile2_send(tctx, ev); torture_assert_not_null(tctx, req, "profile2_send failed\n"); ok = tevent_req_poll_unix(req, ev, &err); torture_assert(tctx, ok, "tevent_req_poll_unix failed\n"); err = profile2_recv(req, tctx, &p1); torture_assert_int_equal(tctx, err, 0, "profile2_recv failed\n"); TALLOC_FREE(req); TALLOC_FREE(ev); printstring = tevent_req_profile_string(tctx, p1, 0, UINT_MAX); torture_assert_not_null( tctx, printstring, "tevent_req_profile_string failed\n"); printf("%s\n", printstring); pack_len = tevent_req_profile_pack(p1, NULL, 0); torture_assert(tctx, pack_len>0, "profile_pack failed\n"); { uint8_t buf[pack_len]; ssize_t unpack_len; tevent_req_profile_pack(p1, buf, sizeof(buf)); dump_data(10, buf, sizeof(buf)); unpack_len = tevent_req_profile_unpack( buf, pack_len, tctx, &p2); torture_assert_int_equal(tctx, pack_len, unpack_len, "profile_unpack failed\n"); } printstring = tevent_req_profile_string(tctx, p2, 0, UINT_MAX); torture_assert_not_null( tctx, printstring, "tevent_req_profile_string failed\n"); printf("%s\n", printstring); tevent_req_profile_get_name(p1, &str1); tevent_req_profile_get_name(p2, &str2); torture_assert_str_equal(tctx, str1, str2, "names differ\n"); tevent_req_profile_get_start(p1, &str1, &tv1); tevent_req_profile_get_start(p2, &str2, &tv2); torture_assert_str_equal(tctx, str1, str2, "start strings differ\n"); torture_assert(tctx, tevent_timeval_compare(&tv1, &tv2) == 0, "start times differ\n"); tevent_req_profile_get_stop(p1, &str1, &tv1); tevent_req_profile_get_stop(p2, &str2, &tv2); torture_assert_str_equal(tctx, str1, str2, "stop strings differ\n"); torture_assert(tctx, tevent_timeval_compare(&tv1, &tv2) == 0, "stop times differ\n"); tevent_req_profile_get_status(p1, &pid1, &state1, &err1); tevent_req_profile_get_status(p2, &pid2, &state2, &err2); torture_assert_int_equal(tctx, pid1, pid2, "pids differ\n"); torture_assert_int_equal(tctx, state1, state2, "states differ\n"); torture_assert_int_equal(tctx, err1, err2, "user errors differ\n"); str1 = tevent_req_profile_string(p1, p1, 0, UINT_MAX); torture_assert_not_null(tctx, str1, "profile_string failed\n"); str2 = tevent_req_profile_string(p2, p2, 0, UINT_MAX); torture_assert_not_null(tctx, str2, "profile_string failed\n"); torture_assert_str_equal(tctx, str1, str2, "result strings differ\n"); TALLOC_FREE(p1); TALLOC_FREE(p2); return true; } struct torture_suite *torture_local_tevent_req(TALLOC_CTX *mem_ctx) { struct torture_suite *suite; suite = torture_suite_create(mem_ctx, "tevent_req"); torture_suite_add_simple_tcase_const( suite, "create", test_tevent_req_create, NULL); torture_suite_add_simple_tcase_const( suite, "profile1", test_tevent_req_profile1, NULL); torture_suite_add_simple_tcase_const( suite, "profile2", test_tevent_req_profile2, NULL); return suite; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tests/test_tevent_tag.c0000660000000000000000000001155200000000000017544 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * * testing of some tevent_req aspects * * Copyright (C) Pavel BÅ™ezina 2021 * * ** NOTE! The following LGPL license applies to the tevent * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include #include #include #include #include #include #include #include static void fd_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { /* Dummy handler. Just return. */ return; } static void timer_handler(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data) { /* Dummy handler. Just return. */ return; } static void signal_handler(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data) { /* Dummy handler. Just return. */ return; } static void immediate_handler(struct tevent_context *ctx, struct tevent_immediate *im, void *private_data) { /* Dummy handler. Just return. */ return; } static int test_setup(void **state) { struct tevent_context *ev; ev = tevent_context_init(NULL); assert_non_null(ev); *state = ev; return 0; } static int test_teardown(void **state) { struct tevent_context *ev = (struct tevent_context *)(*state); talloc_free(ev); return 0; } static void test_fd_tag(void **state) { struct tevent_context *ev = (struct tevent_context *)(*state); struct tevent_fd *fde; uint64_t tag; fde = tevent_add_fd(ev, ev, 0, TEVENT_FD_READ, fd_handler, NULL); assert_non_null(fde); tag = tevent_fd_get_tag(fde); assert_int_equal(0, tag); tevent_fd_set_tag(fde, 1); tag = tevent_fd_get_tag(fde); assert_int_equal(1, tag); tevent_re_initialise(ev); tag = tevent_fd_get_tag(fde); assert_int_equal(1, tag); TALLOC_FREE(fde); } static void test_timer_tag(void **state) { struct tevent_context *ev = (struct tevent_context *)(*state); struct tevent_timer *te; struct timeval next; uint64_t tag; next = tevent_timeval_current(); te = tevent_add_timer(ev, ev, next, timer_handler, NULL); assert_non_null(te); tag = tevent_timer_get_tag(te); assert_int_equal(0, tag); tevent_timer_set_tag(te, 1); tag = tevent_timer_get_tag(te); assert_int_equal(1, tag); next = tevent_timeval_current(); tevent_update_timer(te, next); tag = tevent_timer_get_tag(te); assert_int_equal(1, tag); tevent_re_initialise(ev); tag = tevent_timer_get_tag(te); assert_int_equal(1, tag); TALLOC_FREE(te); } static void test_signal_tag(void **state) { struct tevent_context *ev = (struct tevent_context *)(*state); struct tevent_signal *se; uint64_t tag; se = tevent_add_signal(ev, ev, SIGUSR1, 0, signal_handler, NULL); assert_non_null(se); tag = tevent_signal_get_tag(se); assert_int_equal(0, tag); tevent_signal_set_tag(se, 1); tag = tevent_signal_get_tag(se); assert_int_equal(1, tag); tevent_re_initialise(ev); tag = tevent_signal_get_tag(se); assert_int_equal(1, tag); TALLOC_FREE(se); } static void test_immediate_tag(void **state) { struct tevent_context *ev = (struct tevent_context *)(*state); struct tevent_immediate *im; uint64_t tag; im = tevent_create_immediate(ev); assert_non_null(im); tag = tevent_immediate_get_tag(im); assert_int_equal(0, tag); tevent_immediate_set_tag(im, 1); tag = tevent_immediate_get_tag(im); assert_int_equal(1, tag); tevent_schedule_immediate(im, ev, immediate_handler, NULL); tag = tevent_immediate_get_tag(im); assert_int_equal(1, tag); tevent_re_initialise(ev); tag = tevent_immediate_get_tag(im); assert_int_equal(1, tag); TALLOC_FREE(im); } int main(int argc, char **argv) { const struct CMUnitTest tests[] = { cmocka_unit_test_setup_teardown(test_fd_tag, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_timer_tag, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_signal_tag, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_immediate_tag, test_setup, test_teardown), }; cmocka_set_message_output(CM_OUTPUT_SUBUNIT); return cmocka_run_group_tests(tests, NULL, NULL); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tests/test_tevent_trace.c0000660000000000000000000007576600000000000020110 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * * testing of some tevent_req aspects * * Copyright (C) Pavel BÅ™ezina 2021 * * ** NOTE! The following LGPL license applies to the tevent * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include #include #include #include #include #include #include #include #include struct test_ctx { struct tevent_context *ev; bool handler_skipped; bool reattach_reset; uint64_t (*get_tag)(const void *event); void (*set_tag)(void *event, uint64_t tag); uint64_t current_tag; bool attach; bool before_handler; bool handler_called; bool detach; }; static void fd_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_fd_get_tag(fde); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); return; } static void timer_handler(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_timer_get_tag(te); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); return; } static void signal_handler(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_signal_get_tag(se); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); return; } static void immediate_handler(struct tevent_context *ctx, struct tevent_immediate *im, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_immediate_get_tag(im); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); return; } static void immediate_handler_reschedule(struct tevent_context *ctx, struct tevent_immediate *im, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_immediate_get_tag(im); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); assert_false(tctx->reattach_reset); tctx->reattach_reset = true; tevent_schedule_immediate(im, tctx->ev, immediate_handler, tctx); assert_false(tctx->reattach_reset); assert_false(tctx->handler_skipped); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); assert_int_not_equal(tag, tctx->current_tag); tag = tevent_immediate_get_tag(im); assert_int_equal(tag, tctx->current_tag); tctx->handler_skipped = true; tctx->reattach_reset = true; tevent_schedule_immediate(im, tctx->ev, immediate_handler, tctx); assert_false(tctx->reattach_reset); assert_false(tctx->handler_skipped); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); assert_int_not_equal(tag, tctx->current_tag); tag = tevent_immediate_get_tag(im); assert_int_equal(tag, tctx->current_tag); } static void fd_handler_free(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_fd_get_tag(fde); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); TALLOC_FREE(fde); assert_true(tctx->detach); return; } static void timer_handler_free(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_timer_get_tag(te); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); TALLOC_FREE(te); assert_true(tctx->detach); return; } static void signal_handler_free(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_signal_get_tag(se); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); TALLOC_FREE(se); assert_true(tctx->detach); return; } static void immediate_handler_free(struct tevent_context *ctx, struct tevent_immediate *im, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tevent_immediate_get_tag(im); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_called = true; assert_int_equal(tag, tctx->current_tag); TALLOC_FREE(im); assert_true(tctx->detach); return; } static void trace_event_cb(void *event, enum tevent_event_trace_point point, void *private_data) { struct test_ctx *tctx = (struct test_ctx *)private_data; uint64_t tag = tctx->get_tag(event); switch (point) { case TEVENT_EVENT_TRACE_ATTACH: if (tctx->reattach_reset) { assert_true(tctx->attach); assert_true(tctx->detach); tctx->attach = false; tctx->before_handler = false; tctx->handler_called = false; tctx->detach = false; tctx->handler_skipped = false; tctx->reattach_reset = false; } assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->attach = true; assert_int_equal(tag, tctx->current_tag); tag = ++tctx->current_tag; tctx->set_tag(event, tag); break; case TEVENT_EVENT_TRACE_BEFORE_HANDLER: assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->before_handler = true; assert_int_equal(tag, tctx->current_tag); break; case TEVENT_EVENT_TRACE_DETACH: assert_true(tctx->attach); if (tctx->handler_skipped) { assert_false(tctx->before_handler); assert_false(tctx->handler_called); } else { assert_true(tctx->before_handler); assert_true(tctx->handler_called); } assert_false(tctx->detach); tctx->detach = true; assert_int_equal(tag, tctx->current_tag); break; } } static int test_setup(void **state) { struct test_ctx *tctx; tctx = talloc_zero(NULL, struct test_ctx); assert_non_null(tctx); tctx->ev = tevent_context_init(tctx); assert_non_null(tctx->ev); *state = tctx; return 0; } static int test_teardown(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); tctx->get_tag = NULL; tctx->set_tag = NULL; tevent_set_trace_fd_callback(tctx->ev, NULL, NULL); tevent_set_trace_timer_callback(tctx->ev, NULL, NULL); tevent_set_trace_signal_callback(tctx->ev, NULL, NULL); tevent_set_trace_immediate_callback(tctx->ev, NULL, NULL); TALLOC_FREE(tctx); return 0; } static uint64_t fd_get_tag(const void *_event) { const struct tevent_fd *event = (const struct tevent_fd *)_event; return tevent_fd_get_tag(event); } static void fd_set_tag(void *_event, uint64_t tag) { struct tevent_fd *event = (struct tevent_fd *)_event; tevent_fd_set_tag(event, tag); } static uint64_t timer_get_tag(const void *_event) { const struct tevent_timer *event = (const struct tevent_timer *)_event; return tevent_timer_get_tag(event); } static void timer_set_tag(void *_event, uint64_t tag) { struct tevent_timer *event = (struct tevent_timer *)_event; tevent_timer_set_tag(event, tag); } static uint64_t signal_get_tag(const void *_event) { const struct tevent_signal *event = (const struct tevent_signal *)_event; return tevent_signal_get_tag(event); } static void signal_set_tag(void *_event, uint64_t tag) { struct tevent_signal *event = (struct tevent_signal *)_event; tevent_signal_set_tag(event, tag); } static uint64_t immediate_get_tag(const void *_event) { const struct tevent_immediate *event = (const struct tevent_immediate *)_event; return tevent_immediate_get_tag(event); } static void immediate_set_tag(void *_event, uint64_t tag) { struct tevent_immediate *event = (struct tevent_immediate *)_event; tevent_immediate_set_tag(event, tag); } static void test_trace_event_fd__loop(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_fd *fde; tctx->get_tag = fd_get_tag; tctx->set_tag = fd_set_tag; tevent_set_trace_fd_callback(tctx->ev, (tevent_trace_fd_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); fde = tevent_add_fd(tctx->ev, tctx, 0, TEVENT_FD_WRITE, fd_handler, tctx); assert_non_null(fde); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); TEVENT_FD_WRITEABLE(fde); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_false(tctx->detach); TALLOC_FREE(fde); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_fd__reset(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_fd *fde; tctx->get_tag = fd_get_tag; tctx->set_tag = fd_set_tag; tevent_set_trace_fd_callback(tctx->ev, (tevent_trace_fd_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); fde = tevent_add_fd(tctx->ev, tctx, 0, TEVENT_FD_WRITE, fd_handler, tctx); assert_non_null(fde); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_skipped = true; tevent_re_initialise(tctx->ev); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_fd__free(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_fd *fde; tctx->get_tag = fd_get_tag; tctx->set_tag = fd_set_tag; tevent_set_trace_fd_callback(tctx->ev, (tevent_trace_fd_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); fde = tevent_add_fd(tctx->ev, tctx, 0, TEVENT_FD_WRITE, fd_handler, tctx); assert_non_null(fde); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_skipped = true; TALLOC_FREE(fde); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_fd__free_in_handler(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_fd *fde; tctx->get_tag = fd_get_tag; tctx->set_tag = fd_set_tag; tevent_set_trace_fd_callback(tctx->ev, (tevent_trace_fd_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); fde = tevent_add_fd(tctx->ev, tctx, 0, TEVENT_FD_WRITE, fd_handler_free, tctx); assert_non_null(fde); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); TEVENT_FD_WRITEABLE(fde); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_timer__loop(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_timer *te; struct timeval next; tctx->get_tag = timer_get_tag; tctx->set_tag = timer_set_tag; tevent_set_trace_timer_callback(tctx->ev, (tevent_trace_timer_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); next = tevent_timeval_current(); te = tevent_add_timer(tctx->ev, tctx, next, timer_handler, tctx); assert_non_null(te); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); /* timer events are self destructing after calling the handler */ assert_true(tctx->detach); } static void test_trace_event_timer__reset(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_timer *te; struct timeval next; tctx->get_tag = timer_get_tag; tctx->set_tag = timer_set_tag; tevent_set_trace_timer_callback(tctx->ev, (tevent_trace_timer_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); next = tevent_timeval_current(); te = tevent_add_timer(tctx->ev, tctx, next, timer_handler, tctx); assert_non_null(te); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); assert_true(tctx->attach); assert_false(tctx->reattach_reset); tctx->handler_skipped = true; tctx->reattach_reset = true; next = tevent_timeval_current(); tevent_update_timer(te, next); assert_false(tctx->reattach_reset); assert_false(tctx->handler_skipped); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_skipped = true; tevent_re_initialise(tctx->ev); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_timer__free(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_timer *te; struct timeval next; tctx->get_tag = timer_get_tag; tctx->set_tag = timer_set_tag; tevent_set_trace_timer_callback(tctx->ev, (tevent_trace_timer_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); next = tevent_timeval_current(); te = tevent_add_timer(tctx->ev, tctx, next, timer_handler, tctx); assert_non_null(te); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); assert_true(tctx->attach); tctx->handler_skipped = true; TALLOC_FREE(te); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_timer__free_in_handler(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_timer *te; struct timeval next; tctx->get_tag = timer_get_tag; tctx->set_tag = timer_set_tag; tevent_set_trace_timer_callback(tctx->ev, (tevent_trace_timer_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); next = tevent_timeval_current(); te = tevent_add_timer(tctx->ev, tctx, next, timer_handler_free, tctx); assert_non_null(te); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_signal__loop(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_signal *se; tctx->get_tag = signal_get_tag; tctx->set_tag = signal_set_tag; tevent_set_trace_signal_callback(tctx->ev, (tevent_trace_signal_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); se = tevent_add_signal(tctx->ev, tctx, SIGUSR1, 0, signal_handler, tctx); assert_non_null(se); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); kill(getpid(), SIGUSR1); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_false(tctx->detach); TALLOC_FREE(se); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_signal__reset(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_signal *se; tctx->get_tag = signal_get_tag; tctx->set_tag = signal_set_tag; tevent_set_trace_signal_callback(tctx->ev, (tevent_trace_signal_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); se = tevent_add_signal(tctx->ev, tctx, SIGUSR1, 0, signal_handler, tctx); assert_non_null(se); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_skipped = true; TALLOC_FREE(se); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_signal__free(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_signal *se; tctx->get_tag = signal_get_tag; tctx->set_tag = signal_set_tag; tevent_set_trace_signal_callback(tctx->ev, (tevent_trace_signal_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); se = tevent_add_signal(tctx->ev, tctx, SIGUSR1, 0, signal_handler, tctx); assert_non_null(se); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_skipped = true; TALLOC_FREE(se); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_signal__free_in_handler(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_signal *se; tctx->get_tag = signal_get_tag; tctx->set_tag = signal_set_tag; tevent_set_trace_signal_callback(tctx->ev, (tevent_trace_signal_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); se = tevent_add_signal(tctx->ev, tctx, SIGUSR1, 0, signal_handler_free, tctx); assert_non_null(se); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); kill(getpid(), SIGUSR1); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_immediate__loop(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_immediate *im; tctx->get_tag = immediate_get_tag; tctx->set_tag = immediate_set_tag; tevent_set_trace_immediate_callback(tctx->ev, (tevent_trace_immediate_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); im = tevent_create_immediate(tctx); assert_non_null(im); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_schedule_immediate(im, tctx->ev, immediate_handler, tctx); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); /* immediate events are self detaching */ assert_true(tctx->detach); } static void test_trace_event_immediate__reset(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_immediate *im; tctx->get_tag = immediate_get_tag; tctx->set_tag = immediate_set_tag; tevent_set_trace_immediate_callback(tctx->ev, (tevent_trace_immediate_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); im = tevent_create_immediate(tctx); assert_non_null(im); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_schedule_immediate(im, tctx->ev, immediate_handler, tctx); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_skipped = true; tevent_re_initialise(tctx->ev); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_immediate__free(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_immediate *im; tctx->get_tag = immediate_get_tag; tctx->set_tag = immediate_set_tag; tevent_set_trace_immediate_callback(tctx->ev, (tevent_trace_immediate_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); im = tevent_create_immediate(tctx); assert_non_null(im); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_schedule_immediate(im, tctx->ev, immediate_handler, tctx); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tctx->handler_skipped = true; TALLOC_FREE(im); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_immediate__free_in_handler(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_immediate *im; tctx->get_tag = immediate_get_tag; tctx->set_tag = immediate_set_tag; tevent_set_trace_immediate_callback(tctx->ev, (tevent_trace_immediate_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); im = tevent_create_immediate(tctx); assert_non_null(im); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_schedule_immediate(im, tctx->ev, immediate_handler_free, tctx); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); assert_true(tctx->detach); } static void test_trace_event_immediate__reschedule(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); struct tevent_immediate *im; tctx->get_tag = immediate_get_tag; tctx->set_tag = immediate_set_tag; tevent_set_trace_immediate_callback(tctx->ev, (tevent_trace_immediate_callback_t)trace_event_cb, tctx); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); im = tevent_create_immediate(tctx); assert_non_null(im); assert_false(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_schedule_immediate(im, tctx->ev, immediate_handler, tctx); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); assert_false(tctx->reattach_reset); tctx->handler_skipped = true; tctx->reattach_reset = true; tevent_schedule_immediate(im, tctx->ev, immediate_handler_reschedule, tctx); assert_false(tctx->reattach_reset); assert_false(tctx->handler_skipped); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_loop_once(tctx->ev); assert_false(tctx->reattach_reset); assert_true(tctx->attach); assert_false(tctx->before_handler); assert_false(tctx->handler_called); assert_false(tctx->detach); tevent_loop_once(tctx->ev); assert_true(tctx->attach); assert_true(tctx->before_handler); assert_true(tctx->handler_called); /* immediate events are self detaching */ assert_true(tctx->detach); } static void test_get_set_trace_fd_callback(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); tevent_trace_fd_callback_t cb; void *pvt; tevent_get_trace_fd_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); tevent_set_trace_fd_callback(tctx->ev, (tevent_trace_fd_callback_t)trace_event_cb, tctx); tevent_get_trace_fd_callback(tctx->ev, &cb, &pvt); assert_ptr_equal(cb, trace_event_cb); assert_ptr_equal(pvt, tctx); tevent_set_trace_fd_callback(tctx->ev, NULL, NULL); tevent_get_trace_fd_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); } static void test_get_set_trace_timer_callback(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); tevent_trace_timer_callback_t cb; void *pvt; tevent_get_trace_timer_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); tevent_set_trace_timer_callback(tctx->ev, (tevent_trace_timer_callback_t)trace_event_cb, tctx); tevent_get_trace_timer_callback(tctx->ev, &cb, &pvt); assert_ptr_equal(cb, trace_event_cb); assert_ptr_equal(pvt, tctx); tevent_set_trace_timer_callback(tctx->ev, NULL, NULL); tevent_get_trace_timer_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); } static void test_get_set_trace_signal_callback(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); tevent_trace_signal_callback_t cb; void *pvt; tevent_get_trace_signal_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); tevent_set_trace_signal_callback(tctx->ev, (tevent_trace_signal_callback_t)trace_event_cb, tctx); tevent_get_trace_signal_callback(tctx->ev, &cb, &pvt); assert_ptr_equal(cb, trace_event_cb); assert_ptr_equal(pvt, tctx); tevent_set_trace_signal_callback(tctx->ev, NULL, NULL); tevent_get_trace_signal_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); } static void test_get_set_trace_immediate_callback(void **state) { struct test_ctx *tctx = (struct test_ctx *)(*state); tevent_trace_immediate_callback_t cb; void *pvt; tevent_get_trace_immediate_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); tevent_set_trace_immediate_callback(tctx->ev, (tevent_trace_immediate_callback_t)trace_event_cb, tctx); tevent_get_trace_immediate_callback(tctx->ev, &cb, &pvt); assert_ptr_equal(cb, trace_event_cb); assert_ptr_equal(pvt, tctx); tevent_set_trace_immediate_callback(tctx->ev, NULL, NULL); tevent_get_trace_immediate_callback(tctx->ev, &cb, &pvt); assert_null(cb); assert_null(pvt); } int main(int argc, char **argv) { const struct CMUnitTest tests[] = { cmocka_unit_test_setup_teardown(test_trace_event_fd__loop, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_fd__reset, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_fd__free, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_fd__free_in_handler, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_timer__loop, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_timer__reset, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_timer__free, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_timer__free_in_handler, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_signal__loop, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_signal__reset, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_signal__free, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_signal__free_in_handler, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_immediate__loop, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_immediate__reset, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_immediate__free, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_immediate__free_in_handler, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_trace_event_immediate__reschedule, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_get_set_trace_fd_callback, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_get_set_trace_timer_callback, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_get_set_trace_signal_callback, test_setup, test_teardown), cmocka_unit_test_setup_teardown(test_get_set_trace_immediate_callback, test_setup, test_teardown), }; cmocka_set_message_output(CM_OUTPUT_SUBUNIT); return cmocka_run_group_tests(tests, NULL, NULL); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.4581048 tevent-0.11.0/testsuite.c0000660000000000000000000012750100000000000015236 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. testing of the events subsystem Copyright (C) Stefan Metzmacher 2006-2009 Copyright (C) Jeremy Allison 2013 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "includes.h" #define TEVENT_DEPRECATED 1 #include "tevent.h" #include "system/filesys.h" #include "system/select.h" #include "system/network.h" #include "torture/torture.h" #include "torture/local/proto.h" #ifdef HAVE_PTHREAD #include "system/threads.h" #include #endif static int fde_count; static void do_read(int fd, void *buf, size_t count) { ssize_t ret; do { ret = read(fd, buf, count); } while (ret == -1 && errno == EINTR); } static void fde_handler_read(struct tevent_context *ev_ctx, struct tevent_fd *f, uint16_t flags, void *private_data) { int *fd = (int *)private_data; char c; #ifdef SA_SIGINFO kill(getpid(), SIGUSR1); #endif kill(getpid(), SIGALRM); do_read(fd[0], &c, 1); fde_count++; } static void do_write(int fd, void *buf, size_t count) { ssize_t ret; do { ret = write(fd, buf, count); } while (ret == -1 && errno == EINTR); } static void fde_handler_write(struct tevent_context *ev_ctx, struct tevent_fd *f, uint16_t flags, void *private_data) { int *fd = (int *)private_data; char c = 0; do_write(fd[1], &c, 1); } /* This will only fire if the fd's returned from pipe() are bi-directional. */ static void fde_handler_read_1(struct tevent_context *ev_ctx, struct tevent_fd *f, uint16_t flags, void *private_data) { int *fd = (int *)private_data; char c; #ifdef SA_SIGINFO kill(getpid(), SIGUSR1); #endif kill(getpid(), SIGALRM); do_read(fd[1], &c, 1); fde_count++; } /* This will only fire if the fd's returned from pipe() are bi-directional. */ static void fde_handler_write_1(struct tevent_context *ev_ctx, struct tevent_fd *f, uint16_t flags, void *private_data) { int *fd = (int *)private_data; char c = 0; do_write(fd[0], &c, 1); } static void finished_handler(struct tevent_context *ev_ctx, struct tevent_timer *te, struct timeval tval, void *private_data) { int *finished = (int *)private_data; (*finished) = 1; } static void count_handler(struct tevent_context *ev_ctx, struct tevent_signal *te, int signum, int count, void *info, void *private_data) { int *countp = (int *)private_data; (*countp) += count; } static bool test_event_context(struct torture_context *test, const void *test_data) { struct tevent_context *ev_ctx; int fd[2] = { -1, -1 }; const char *backend = (const char *)test_data; int alarm_count=0, info_count=0; struct tevent_fd *fde_read; struct tevent_fd *fde_read_1; struct tevent_fd *fde_write; struct tevent_fd *fde_write_1; #ifdef SA_RESTART struct tevent_signal *se1 = NULL; #endif #ifdef SA_RESETHAND struct tevent_signal *se2 = NULL; #endif #ifdef SA_SIGINFO struct tevent_signal *se3 = NULL; #endif int finished=0; struct timeval t; int ret; ev_ctx = tevent_context_init_byname(test, backend); if (ev_ctx == NULL) { torture_comment(test, "event backend '%s' not supported\n", backend); return true; } torture_comment(test, "backend '%s' - %s\n", backend, __FUNCTION__); /* reset globals */ fde_count = 0; /* create a pipe */ ret = pipe(fd); torture_assert_int_equal(test, ret, 0, "pipe failed"); fde_read = tevent_add_fd(ev_ctx, ev_ctx, fd[0], TEVENT_FD_READ, fde_handler_read, fd); fde_write_1 = tevent_add_fd(ev_ctx, ev_ctx, fd[0], TEVENT_FD_WRITE, fde_handler_write_1, fd); fde_write = tevent_add_fd(ev_ctx, ev_ctx, fd[1], TEVENT_FD_WRITE, fde_handler_write, fd); fde_read_1 = tevent_add_fd(ev_ctx, ev_ctx, fd[1], TEVENT_FD_READ, fde_handler_read_1, fd); tevent_fd_set_auto_close(fde_read); tevent_fd_set_auto_close(fde_write); tevent_add_timer(ev_ctx, ev_ctx, timeval_current_ofs(2,0), finished_handler, &finished); #ifdef SA_RESTART se1 = tevent_add_signal(ev_ctx, ev_ctx, SIGALRM, SA_RESTART, count_handler, &alarm_count); torture_assert(test, se1 != NULL, "failed to setup se1"); #endif #ifdef SA_RESETHAND se2 = tevent_add_signal(ev_ctx, ev_ctx, SIGALRM, SA_RESETHAND, count_handler, &alarm_count); torture_assert(test, se2 != NULL, "failed to setup se2"); #endif #ifdef SA_SIGINFO se3 = tevent_add_signal(ev_ctx, ev_ctx, SIGUSR1, SA_SIGINFO, count_handler, &info_count); torture_assert(test, se3 != NULL, "failed to setup se3"); #endif t = timeval_current(); while (!finished) { errno = 0; if (tevent_loop_once(ev_ctx) == -1) { TALLOC_FREE(ev_ctx); torture_fail(test, talloc_asprintf(test, "Failed event loop %s\n", strerror(errno))); return false; } } talloc_free(fde_read_1); talloc_free(fde_write_1); talloc_free(fde_read); talloc_free(fde_write); while (alarm_count < fde_count+1) { if (tevent_loop_once(ev_ctx) == -1) { break; } } torture_comment(test, "Got %.2f pipe events/sec\n", fde_count/timeval_elapsed(&t)); #ifdef SA_RESTART talloc_free(se1); #endif torture_assert_int_equal(test, alarm_count, 1+fde_count, "alarm count mismatch"); #ifdef SA_RESETHAND /* * we do not call talloc_free(se2) * because it is already gone, * after triggering the event handler. */ #endif #ifdef SA_SIGINFO talloc_free(se3); torture_assert_int_equal(test, info_count, fde_count, "info count mismatch"); #endif talloc_free(ev_ctx); return true; } struct test_event_fd1_state { struct torture_context *tctx; const char *backend; struct tevent_context *ev; int sock[2]; struct tevent_timer *te; struct tevent_fd *fde0; struct tevent_fd *fde1; bool got_write; bool got_read; bool drain; bool drain_done; unsigned loop_count; bool finished; const char *error; }; static void test_event_fd1_fde_handler(struct tevent_context *ev_ctx, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct test_event_fd1_state *state = (struct test_event_fd1_state *)private_data; if (state->drain_done) { state->finished = true; state->error = __location__; return; } if (state->drain) { ssize_t ret; uint8_t c = 0; if (!(flags & TEVENT_FD_READ)) { state->finished = true; state->error = __location__; return; } ret = read(state->sock[0], &c, 1); if (ret == 1) { return; } /* * end of test... */ tevent_fd_set_flags(fde, 0); state->drain_done = true; return; } if (!state->got_write) { uint8_t c = 0; if (flags != TEVENT_FD_WRITE) { state->finished = true; state->error = __location__; return; } state->got_write = true; /* * we write to the other socket... */ do_write(state->sock[1], &c, 1); TEVENT_FD_NOT_WRITEABLE(fde); TEVENT_FD_READABLE(fde); return; } if (!state->got_read) { if (flags != TEVENT_FD_READ) { state->finished = true; state->error = __location__; return; } state->got_read = true; TEVENT_FD_NOT_READABLE(fde); return; } state->finished = true; state->error = __location__; return; } static void test_event_fd1_finished(struct tevent_context *ev_ctx, struct tevent_timer *te, struct timeval tval, void *private_data) { struct test_event_fd1_state *state = (struct test_event_fd1_state *)private_data; if (state->drain_done) { state->finished = true; return; } if (!state->got_write) { state->finished = true; state->error = __location__; return; } if (!state->got_read) { state->finished = true; state->error = __location__; return; } state->loop_count++; if (state->loop_count > 3) { state->finished = true; state->error = __location__; return; } state->got_write = false; state->got_read = false; tevent_fd_set_flags(state->fde0, TEVENT_FD_WRITE); if (state->loop_count > 2) { state->drain = true; TALLOC_FREE(state->fde1); TEVENT_FD_READABLE(state->fde0); } state->te = tevent_add_timer(state->ev, state->ev, timeval_current_ofs(0,2000), test_event_fd1_finished, state); } static bool test_event_fd1(struct torture_context *tctx, const void *test_data) { struct test_event_fd1_state state; int ret; ZERO_STRUCT(state); state.tctx = tctx; state.backend = (const char *)test_data; state.ev = tevent_context_init_byname(tctx, state.backend); if (state.ev == NULL) { torture_skip(tctx, talloc_asprintf(tctx, "event backend '%s' not supported\n", state.backend)); return true; } tevent_set_debug_stderr(state.ev); torture_comment(tctx, "backend '%s' - %s\n", state.backend, __FUNCTION__); /* * This tests the following: * * It monitors the state of state.sock[0] * with tevent_fd, but we never read/write on state.sock[0] * while state.sock[1] * is only used to write a few bytes. * * We have a loop: * - we wait only for TEVENT_FD_WRITE on state.sock[0] * - we write 1 byte to state.sock[1] * - we wait only for TEVENT_FD_READ on state.sock[0] * - we disable events on state.sock[0] * - the timer event restarts the loop * Then we close state.sock[1] * We have a loop: * - we wait for TEVENT_FD_READ/WRITE on state.sock[0] * - we try to read 1 byte * - if the read gets an error of returns 0 * we disable the event handler * - the timer finishes the test */ state.sock[0] = -1; state.sock[1] = -1; ret = socketpair(AF_UNIX, SOCK_STREAM, 0, state.sock); torture_assert(tctx, ret == 0, "socketpair() failed"); state.te = tevent_add_timer(state.ev, state.ev, timeval_current_ofs(0,10000), test_event_fd1_finished, &state); state.fde0 = tevent_add_fd(state.ev, state.ev, state.sock[0], TEVENT_FD_WRITE, test_event_fd1_fde_handler, &state); /* state.fde1 is only used to auto close */ state.fde1 = tevent_add_fd(state.ev, state.ev, state.sock[1], 0, test_event_fd1_fde_handler, &state); tevent_fd_set_auto_close(state.fde0); tevent_fd_set_auto_close(state.fde1); while (!state.finished) { errno = 0; if (tevent_loop_once(state.ev) == -1) { talloc_free(state.ev); torture_fail(tctx, talloc_asprintf(tctx, "Failed event loop %s\n", strerror(errno))); } } talloc_free(state.ev); torture_assert(tctx, state.error == NULL, talloc_asprintf(tctx, "%s", state.error)); return true; } struct test_event_fd2_state { struct torture_context *tctx; const char *backend; struct tevent_context *ev; struct tevent_timer *te; struct test_event_fd2_sock { struct test_event_fd2_state *state; int fd; struct tevent_fd *fde; size_t num_written; size_t num_read; bool got_full; } sock0, sock1; bool finished; const char *error; }; static void test_event_fd2_sock_handler(struct tevent_context *ev_ctx, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct test_event_fd2_sock *cur_sock = (struct test_event_fd2_sock *)private_data; struct test_event_fd2_state *state = cur_sock->state; struct test_event_fd2_sock *oth_sock = NULL; uint8_t v = 0, c; ssize_t ret; if (cur_sock == &state->sock0) { oth_sock = &state->sock1; } else { oth_sock = &state->sock0; } if (oth_sock->num_written == 1) { if (flags != (TEVENT_FD_READ | TEVENT_FD_WRITE)) { state->finished = true; state->error = __location__; return; } } if (cur_sock->num_read == oth_sock->num_written) { state->finished = true; state->error = __location__; return; } if (!(flags & TEVENT_FD_READ)) { state->finished = true; state->error = __location__; return; } if (oth_sock->num_read >= PIPE_BUF) { /* * On Linux we become writable once we've read * one byte. On Solaris we only become writable * again once we've read 4096 bytes. PIPE_BUF * is probably a safe bet to test against. * * There should be room to write a byte again */ if (!(flags & TEVENT_FD_WRITE)) { state->finished = true; state->error = __location__; return; } } if ((flags & TEVENT_FD_WRITE) && !cur_sock->got_full) { v = (uint8_t)cur_sock->num_written; ret = write(cur_sock->fd, &v, 1); if (ret != 1) { state->finished = true; state->error = __location__; return; } cur_sock->num_written++; if (cur_sock->num_written > 0x80000000) { state->finished = true; state->error = __location__; return; } return; } if (!cur_sock->got_full) { cur_sock->got_full = true; if (!oth_sock->got_full) { /* * cur_sock is full, * lets wait for oth_sock * to be filled */ tevent_fd_set_flags(cur_sock->fde, 0); return; } /* * oth_sock waited for cur_sock, * lets restart it */ tevent_fd_set_flags(oth_sock->fde, TEVENT_FD_READ|TEVENT_FD_WRITE); } ret = read(cur_sock->fd, &v, 1); if (ret != 1) { state->finished = true; state->error = __location__; return; } c = (uint8_t)cur_sock->num_read; if (c != v) { state->finished = true; state->error = __location__; return; } cur_sock->num_read++; if (cur_sock->num_read < oth_sock->num_written) { /* there is more to read */ return; } /* * we read everything, we need to remove TEVENT_FD_WRITE * to avoid spinning */ TEVENT_FD_NOT_WRITEABLE(cur_sock->fde); if (oth_sock->num_read == cur_sock->num_written) { /* * both directions are finished */ state->finished = true; } return; } static void test_event_fd2_finished(struct tevent_context *ev_ctx, struct tevent_timer *te, struct timeval tval, void *private_data) { struct test_event_fd2_state *state = (struct test_event_fd2_state *)private_data; /* * this should never be triggered */ state->finished = true; state->error = __location__; } static bool test_event_fd2(struct torture_context *tctx, const void *test_data) { struct test_event_fd2_state state; int sock[2]; uint8_t c = 0; ZERO_STRUCT(state); state.tctx = tctx; state.backend = (const char *)test_data; state.ev = tevent_context_init_byname(tctx, state.backend); if (state.ev == NULL) { torture_skip(tctx, talloc_asprintf(tctx, "event backend '%s' not supported\n", state.backend)); return true; } tevent_set_debug_stderr(state.ev); torture_comment(tctx, "backend '%s' - %s\n", state.backend, __FUNCTION__); /* * This tests the following * * - We write 1 byte to each socket * - We wait for TEVENT_FD_READ/WRITE on both sockets * - When we get TEVENT_FD_WRITE we write 1 byte * until both socket buffers are full, which * means both sockets only get TEVENT_FD_READ. * - Then we read 1 byte until we have consumed * all bytes the other end has written. */ sock[0] = -1; sock[1] = -1; socketpair(AF_UNIX, SOCK_STREAM, 0, sock); /* * the timer should never expire */ state.te = tevent_add_timer(state.ev, state.ev, timeval_current_ofs(600, 0), test_event_fd2_finished, &state); state.sock0.state = &state; state.sock0.fd = sock[0]; state.sock0.fde = tevent_add_fd(state.ev, state.ev, state.sock0.fd, TEVENT_FD_READ | TEVENT_FD_WRITE, test_event_fd2_sock_handler, &state.sock0); state.sock1.state = &state; state.sock1.fd = sock[1]; state.sock1.fde = tevent_add_fd(state.ev, state.ev, state.sock1.fd, TEVENT_FD_READ | TEVENT_FD_WRITE, test_event_fd2_sock_handler, &state.sock1); tevent_fd_set_auto_close(state.sock0.fde); tevent_fd_set_auto_close(state.sock1.fde); do_write(state.sock0.fd, &c, 1); state.sock0.num_written++; do_write(state.sock1.fd, &c, 1); state.sock1.num_written++; while (!state.finished) { errno = 0; if (tevent_loop_once(state.ev) == -1) { talloc_free(state.ev); torture_fail(tctx, talloc_asprintf(tctx, "Failed event loop %s\n", strerror(errno))); } } talloc_free(state.ev); torture_assert(tctx, state.error == NULL, talloc_asprintf(tctx, "%s", state.error)); return true; } struct test_wrapper_state { struct torture_context *tctx; int num_events; int num_wrap_handlers; }; static bool test_wrapper_before_use(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; return true; } static void test_wrapper_after_use(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_before_fd_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_fd *fde, uint16_t flags, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_after_fd_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_fd *fde, uint16_t flags, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_before_timer_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_timer *te, struct timeval requested_time, struct timeval trigger_time, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_after_timer_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_timer *te, struct timeval requested_time, struct timeval trigger_time, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_before_immediate_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_immediate *im, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_after_immediate_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_immediate *im, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_before_signal_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_signal *se, int signum, int count, void *siginfo, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static void test_wrapper_after_signal_handler(struct tevent_context *wrap_ev, void *private_data, struct tevent_context *main_ev, struct tevent_signal *se, int signum, int count, void *siginfo, const char *handler_name, const char *location) { struct test_wrapper_state *state = talloc_get_type_abort(private_data, struct test_wrapper_state); torture_comment(state->tctx, "%s\n", __func__); state->num_wrap_handlers++; } static const struct tevent_wrapper_ops test_wrapper_ops = { .name = "test_wrapper", .before_use = test_wrapper_before_use, .after_use = test_wrapper_after_use, .before_fd_handler = test_wrapper_before_fd_handler, .after_fd_handler = test_wrapper_after_fd_handler, .before_timer_handler = test_wrapper_before_timer_handler, .after_timer_handler = test_wrapper_after_timer_handler, .before_immediate_handler = test_wrapper_before_immediate_handler, .after_immediate_handler = test_wrapper_after_immediate_handler, .before_signal_handler = test_wrapper_before_signal_handler, .after_signal_handler = test_wrapper_after_signal_handler, }; static void test_wrapper_timer_handler(struct tevent_context *ev, struct tevent_timer *te, struct timeval tv, void *private_data) { struct test_wrapper_state *state = (struct test_wrapper_state *)private_data; torture_comment(state->tctx, "timer handler\n"); state->num_events++; talloc_free(te); return; } static void test_wrapper_fd_handler(struct tevent_context *ev, struct tevent_fd *fde, unsigned short fd_flags, void *private_data) { struct test_wrapper_state *state = (struct test_wrapper_state *)private_data; torture_comment(state->tctx, "fd handler\n"); state->num_events++; talloc_free(fde); return; } static void test_wrapper_immediate_handler(struct tevent_context *ev, struct tevent_immediate *im, void *private_data) { struct test_wrapper_state *state = (struct test_wrapper_state *)private_data; state->num_events++; talloc_free(im); torture_comment(state->tctx, "immediate handler\n"); return; } static void test_wrapper_signal_handler(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data) { struct test_wrapper_state *state = (struct test_wrapper_state *)private_data; torture_comment(state->tctx, "signal handler\n"); state->num_events++; talloc_free(se); return; } static bool test_wrapper(struct torture_context *tctx, const void *test_data) { struct test_wrapper_state *state = NULL; int sock[2] = { -1, -1}; uint8_t c = 0; const int num_events = 4; const char *backend = (const char *)test_data; struct tevent_context *ev = NULL; struct tevent_context *wrap_ev = NULL; struct tevent_fd *fde = NULL; struct tevent_timer *te = NULL; struct tevent_signal *se = NULL; struct tevent_immediate *im = NULL; int ret; bool ok = false; bool ret2; ev = tevent_context_init_byname(tctx, backend); if (ev == NULL) { torture_skip(tctx, talloc_asprintf(tctx, "event backend '%s' not supported\n", backend)); return true; } tevent_set_debug_stderr(ev); torture_comment(tctx, "tevent backend '%s'\n", backend); wrap_ev = tevent_context_wrapper_create( ev, ev, &test_wrapper_ops, &state, struct test_wrapper_state); torture_assert_not_null_goto(tctx, wrap_ev, ok, done, "tevent_context_wrapper_create failed\n"); *state = (struct test_wrapper_state) { .tctx = tctx, }; ret = socketpair(AF_UNIX, SOCK_STREAM, 0, sock); torture_assert_goto(tctx, ret == 0, ok, done, "socketpair failed\n"); te = tevent_add_timer(wrap_ev, wrap_ev, timeval_current_ofs(0, 0), test_wrapper_timer_handler, state); torture_assert_not_null_goto(tctx, te, ok, done, "tevent_add_timer failed\n"); fde = tevent_add_fd(wrap_ev, wrap_ev, sock[1], TEVENT_FD_READ, test_wrapper_fd_handler, state); torture_assert_not_null_goto(tctx, fde, ok, done, "tevent_add_fd failed\n"); im = tevent_create_immediate(wrap_ev); torture_assert_not_null_goto(tctx, im, ok, done, "tevent_create_immediate failed\n"); se = tevent_add_signal(wrap_ev, wrap_ev, SIGUSR1, 0, test_wrapper_signal_handler, state); torture_assert_not_null_goto(tctx, se, ok, done, "tevent_add_signal failed\n"); do_write(sock[0], &c, 1); kill(getpid(), SIGUSR1); tevent_schedule_immediate(im, wrap_ev, test_wrapper_immediate_handler, state); ret2 = tevent_context_push_use(wrap_ev); torture_assert_goto(tctx, ret2, ok, done, "tevent_context_push_use(wrap_ev) failed\n"); ret2 = tevent_context_push_use(ev); torture_assert_goto(tctx, ret2, ok, pop_use, "tevent_context_push_use(ev) failed\n"); tevent_context_pop_use(ev); tevent_context_pop_use(wrap_ev); ret = tevent_loop_wait(ev); torture_assert_int_equal_goto(tctx, ret, 0, ok, done, "tevent_loop_wait failed\n"); torture_comment(tctx, "Num events: %d\n", state->num_events); torture_comment(tctx, "Num wrap handlers: %d\n", state->num_wrap_handlers); torture_assert_int_equal_goto(tctx, state->num_events, num_events, ok, done, "Wrong event count\n"); torture_assert_int_equal_goto(tctx, state->num_wrap_handlers, num_events*2+2, ok, done, "Wrong wrapper count\n"); ok = true; done: TALLOC_FREE(wrap_ev); TALLOC_FREE(ev); if (sock[0] != -1) { close(sock[0]); } if (sock[1] != -1) { close(sock[1]); } return ok; pop_use: tevent_context_pop_use(wrap_ev); goto done; } static void test_free_wrapper_signal_handler(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data) { struct torture_context *tctx = talloc_get_type_abort(private_data, struct torture_context); torture_comment(tctx, "signal handler\n"); talloc_free(se); /* * signal handlers have highest priority in tevent, so this signal * handler will always be started before the other handlers * below. Freeing the (wrapper) event context here tests that the * wrapper implementation correclty handles the wrapper ev going away * with pending events. */ talloc_free(ev); return; } static void test_free_wrapper_fd_handler(struct tevent_context *ev, struct tevent_fd *fde, unsigned short fd_flags, void *private_data) { /* * This should never be called as * test_free_wrapper_signal_handler() * already destroyed the wrapper tevent_context. */ abort(); } static void test_free_wrapper_immediate_handler(struct tevent_context *ev, struct tevent_immediate *im, void *private_data) { /* * This should never be called as * test_free_wrapper_signal_handler() * already destroyed the wrapper tevent_context. */ abort(); } static void test_free_wrapper_timer_handler(struct tevent_context *ev, struct tevent_timer *te, struct timeval tv, void *private_data) { /* * This should never be called as * test_free_wrapper_signal_handler() * already destroyed the wrapper tevent_context. */ abort(); } static bool test_free_wrapper(struct torture_context *tctx, const void *test_data) { struct test_wrapper_state *state = NULL; int sock[2] = { -1, -1}; uint8_t c = 0; const char *backend = (const char *)test_data; TALLOC_CTX *frame = talloc_stackframe(); struct tevent_context *ev = NULL; struct tevent_context *wrap_ev = NULL; struct tevent_fd *fde = NULL; struct tevent_timer *te = NULL; struct tevent_signal *se = NULL; struct tevent_immediate *im = NULL; int ret; bool ok = false; ev = tevent_context_init_byname(frame, backend); if (ev == NULL) { torture_skip(tctx, talloc_asprintf(tctx, "event backend '%s' not supported\n", backend)); return true; } tevent_set_debug_stderr(ev); torture_comment(tctx, "tevent backend '%s'\n", backend); wrap_ev = tevent_context_wrapper_create( ev, ev, &test_wrapper_ops, &state, struct test_wrapper_state); torture_assert_not_null_goto(tctx, wrap_ev, ok, done, "tevent_context_wrapper_create failed\n"); *state = (struct test_wrapper_state) { .tctx = tctx, }; ret = socketpair(AF_UNIX, SOCK_STREAM, 0, sock); torture_assert_goto(tctx, ret == 0, ok, done, "socketpair failed\n"); fde = tevent_add_fd(wrap_ev, frame, sock[1], TEVENT_FD_READ, test_free_wrapper_fd_handler, NULL); torture_assert_not_null_goto(tctx, fde, ok, done, "tevent_add_fd failed\n"); te = tevent_add_timer(wrap_ev, frame, timeval_current_ofs(0, 0), test_free_wrapper_timer_handler, NULL); torture_assert_not_null_goto(tctx, te, ok, done, "tevent_add_timer failed\n"); im = tevent_create_immediate(frame); torture_assert_not_null_goto(tctx, im, ok, done, "tevent_create_immediate failed\n"); se = tevent_add_signal(wrap_ev, frame, SIGUSR1, 0, test_free_wrapper_signal_handler, tctx); torture_assert_not_null_goto(tctx, se, ok, done, "tevent_add_signal failed\n"); do_write(sock[0], &c, 1); kill(getpid(), SIGUSR1); tevent_schedule_immediate(im, wrap_ev, test_free_wrapper_immediate_handler, NULL); ret = tevent_loop_wait(ev); torture_assert_goto(tctx, ret == 0, ok, done, "tevent_loop_wait failed\n"); ok = true; done: TALLOC_FREE(frame); if (sock[0] != -1) { close(sock[0]); } if (sock[1] != -1) { close(sock[1]); } return ok; } #ifdef HAVE_PTHREAD static pthread_mutex_t threaded_mutex = PTHREAD_MUTEX_INITIALIZER; static bool do_shutdown = false; static void test_event_threaded_lock(void) { int ret; ret = pthread_mutex_lock(&threaded_mutex); assert(ret == 0); } static void test_event_threaded_unlock(void) { int ret; ret = pthread_mutex_unlock(&threaded_mutex); assert(ret == 0); } static void test_event_threaded_trace(enum tevent_trace_point point, void *private_data) { switch (point) { case TEVENT_TRACE_BEFORE_WAIT: test_event_threaded_unlock(); break; case TEVENT_TRACE_AFTER_WAIT: test_event_threaded_lock(); break; case TEVENT_TRACE_BEFORE_LOOP_ONCE: case TEVENT_TRACE_AFTER_LOOP_ONCE: break; } } static void test_event_threaded_timer(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data) { return; } static void *test_event_poll_thread(void *private_data) { struct tevent_context *ev = (struct tevent_context *)private_data; test_event_threaded_lock(); while (true) { int ret; ret = tevent_loop_once(ev); assert(ret == 0); if (do_shutdown) { test_event_threaded_unlock(); return NULL; } } } static void test_event_threaded_read_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data) { int *pfd = (int *)private_data; char c; ssize_t nread; if ((flags & TEVENT_FD_READ) == 0) { return; } do { nread = read(*pfd, &c, 1); } while ((nread == -1) && (errno == EINTR)); assert(nread == 1); } static bool test_event_context_threaded(struct torture_context *test, const void *test_data) { struct tevent_context *ev; struct tevent_timer *te; struct tevent_fd *fde; pthread_t poll_thread; int fds[2]; int ret; char c = 0; ev = tevent_context_init_byname(test, "poll_mt"); torture_assert(test, ev != NULL, "poll_mt not supported"); tevent_set_trace_callback(ev, test_event_threaded_trace, NULL); te = tevent_add_timer(ev, ev, timeval_current_ofs(5, 0), test_event_threaded_timer, NULL); torture_assert(test, te != NULL, "Could not add timer"); ret = pthread_create(&poll_thread, NULL, test_event_poll_thread, ev); torture_assert(test, ret == 0, "Could not create poll thread"); ret = pipe(fds); torture_assert(test, ret == 0, "Could not create pipe"); poll(NULL, 0, 100); test_event_threaded_lock(); fde = tevent_add_fd(ev, ev, fds[0], TEVENT_FD_READ, test_event_threaded_read_handler, &fds[0]); torture_assert(test, fde != NULL, "Could not add fd event"); test_event_threaded_unlock(); poll(NULL, 0, 100); do_write(fds[1], &c, 1); poll(NULL, 0, 100); test_event_threaded_lock(); do_shutdown = true; test_event_threaded_unlock(); do_write(fds[1], &c, 1); ret = pthread_join(poll_thread, NULL); torture_assert(test, ret == 0, "pthread_join failed"); return true; } #define NUM_TEVENT_THREADS 100 /* Ugly, but needed for torture_comment... */ static struct torture_context *thread_test_ctx; static pthread_t thread_map[NUM_TEVENT_THREADS]; static unsigned thread_counter; /* Called in master thread context */ static void callback_nowait(struct tevent_context *ev, struct tevent_immediate *im, void *private_ptr) { pthread_t *thread_id_ptr = talloc_get_type_abort(private_ptr, pthread_t); unsigned i; for (i = 0; i < NUM_TEVENT_THREADS; i++) { if (pthread_equal(*thread_id_ptr, thread_map[i])) { break; } } torture_comment(thread_test_ctx, "Callback %u from thread %u\n", thread_counter, i); thread_counter++; } /* Blast the master tevent_context with a callback, no waiting. */ static void *thread_fn_nowait(void *private_ptr) { struct tevent_thread_proxy *master_tp = talloc_get_type_abort(private_ptr, struct tevent_thread_proxy); struct tevent_immediate *im; pthread_t *thread_id_ptr; im = tevent_create_immediate(NULL); if (im == NULL) { return NULL; } thread_id_ptr = talloc(NULL, pthread_t); if (thread_id_ptr == NULL) { return NULL; } *thread_id_ptr = pthread_self(); tevent_thread_proxy_schedule(master_tp, &im, callback_nowait, &thread_id_ptr); return NULL; } static void timeout_fn(struct tevent_context *ev, struct tevent_timer *te, struct timeval tv, void *p) { thread_counter = NUM_TEVENT_THREADS * 10; } static bool test_multi_tevent_threaded(struct torture_context *test, const void *test_data) { unsigned i; struct tevent_context *master_ev; struct tevent_thread_proxy *tp; talloc_disable_null_tracking(); /* Ugly global stuff. */ thread_test_ctx = test; thread_counter = 0; master_ev = tevent_context_init(NULL); if (master_ev == NULL) { return false; } tevent_set_debug_stderr(master_ev); tp = tevent_thread_proxy_create(master_ev); if (tp == NULL) { torture_fail(test, talloc_asprintf(test, "tevent_thread_proxy_create failed\n")); talloc_free(master_ev); return false; } for (i = 0; i < NUM_TEVENT_THREADS; i++) { int ret = pthread_create(&thread_map[i], NULL, thread_fn_nowait, tp); if (ret != 0) { torture_fail(test, talloc_asprintf(test, "Failed to create thread %i, %d\n", i, ret)); return false; } } /* Ensure we don't wait more than 10 seconds. */ tevent_add_timer(master_ev, master_ev, timeval_current_ofs(10,0), timeout_fn, NULL); while (thread_counter < NUM_TEVENT_THREADS) { int ret = tevent_loop_once(master_ev); torture_assert(test, ret == 0, "tevent_loop_once failed"); } torture_assert(test, thread_counter == NUM_TEVENT_THREADS, "thread_counter fail\n"); talloc_free(master_ev); return true; } struct reply_state { struct tevent_thread_proxy *reply_tp; pthread_t thread_id; int *p_finished; }; static void thread_timeout_fn(struct tevent_context *ev, struct tevent_timer *te, struct timeval tv, void *p) { int *p_finished = (int *)p; *p_finished = 2; } /* Called in child-thread context */ static void thread_callback(struct tevent_context *ev, struct tevent_immediate *im, void *private_ptr) { struct reply_state *rsp = talloc_get_type_abort(private_ptr, struct reply_state); talloc_steal(ev, rsp); *rsp->p_finished = 1; } /* Called in master thread context */ static void master_callback(struct tevent_context *ev, struct tevent_immediate *im, void *private_ptr) { struct reply_state *rsp = talloc_get_type_abort(private_ptr, struct reply_state); unsigned i; talloc_steal(ev, rsp); for (i = 0; i < NUM_TEVENT_THREADS; i++) { if (pthread_equal(rsp->thread_id, thread_map[i])) { break; } } torture_comment(thread_test_ctx, "Callback %u from thread %u\n", thread_counter, i); /* Now reply to the thread ! */ tevent_thread_proxy_schedule(rsp->reply_tp, &im, thread_callback, &rsp); thread_counter++; } static void *thread_fn_1(void *private_ptr) { struct tevent_thread_proxy *master_tp = talloc_get_type_abort(private_ptr, struct tevent_thread_proxy); struct tevent_thread_proxy *tp; struct tevent_immediate *im; struct tevent_context *ev; struct reply_state *rsp; int finished = 0; int ret; ev = tevent_context_init(NULL); if (ev == NULL) { return NULL; } tp = tevent_thread_proxy_create(ev); if (tp == NULL) { talloc_free(ev); return NULL; } im = tevent_create_immediate(ev); if (im == NULL) { talloc_free(ev); return NULL; } rsp = talloc(ev, struct reply_state); if (rsp == NULL) { talloc_free(ev); return NULL; } rsp->thread_id = pthread_self(); rsp->reply_tp = tp; rsp->p_finished = &finished; /* Introduce a little randomness into the mix.. */ usleep(random() % 7000); tevent_thread_proxy_schedule(master_tp, &im, master_callback, &rsp); /* Ensure we don't wait more than 10 seconds. */ tevent_add_timer(ev, ev, timeval_current_ofs(10,0), thread_timeout_fn, &finished); while (finished == 0) { ret = tevent_loop_once(ev); assert(ret == 0); } if (finished > 1) { /* Timeout ! */ abort(); } /* * NB. We should talloc_free(ev) here, but if we do * we currently get hit by helgrind Fix #323432 * "When calling pthread_cond_destroy or pthread_mutex_destroy * with initializers as argument Helgrind (incorrectly) reports errors." * * http://valgrind.10908.n7.nabble.com/Helgrind-3-9-0-false-positive- * with-pthread-mutex-destroy-td47757.html * * Helgrind doesn't understand that the request/reply * messages provide synchronization between the lock/unlock * in tevent_thread_proxy_schedule(), and the pthread_destroy() * when the struct tevent_thread_proxy object is talloc_free'd. * * As a work-around for now return ev for the parent thread to free. */ return ev; } static bool test_multi_tevent_threaded_1(struct torture_context *test, const void *test_data) { unsigned i; struct tevent_context *master_ev; struct tevent_thread_proxy *master_tp; int ret; talloc_disable_null_tracking(); /* Ugly global stuff. */ thread_test_ctx = test; thread_counter = 0; master_ev = tevent_context_init(NULL); if (master_ev == NULL) { return false; } tevent_set_debug_stderr(master_ev); master_tp = tevent_thread_proxy_create(master_ev); if (master_tp == NULL) { torture_fail(test, talloc_asprintf(test, "tevent_thread_proxy_create failed\n")); talloc_free(master_ev); return false; } for (i = 0; i < NUM_TEVENT_THREADS; i++) { ret = pthread_create(&thread_map[i], NULL, thread_fn_1, master_tp); if (ret != 0) { torture_fail(test, talloc_asprintf(test, "Failed to create thread %i, %d\n", i, ret)); return false; } } while (thread_counter < NUM_TEVENT_THREADS) { ret = tevent_loop_once(master_ev); torture_assert(test, ret == 0, "tevent_loop_once failed"); } /* Wait for all the threads to finish - join 'em. */ for (i = 0; i < NUM_TEVENT_THREADS; i++) { void *retval; ret = pthread_join(thread_map[i], &retval); torture_assert(test, ret == 0, "pthread_join failed"); /* Free the child thread event context. */ talloc_free(retval); } talloc_free(master_ev); return true; } struct threaded_test_2 { struct tevent_threaded_context *tctx; struct tevent_immediate *im; pthread_t thread_id; }; static void master_callback_2(struct tevent_context *ev, struct tevent_immediate *im, void *private_data); static void *thread_fn_2(void *private_data) { struct threaded_test_2 *state = private_data; state->thread_id = pthread_self(); usleep(random() % 7000); tevent_threaded_schedule_immediate( state->tctx, state->im, master_callback_2, state); return NULL; } static void master_callback_2(struct tevent_context *ev, struct tevent_immediate *im, void *private_data) { struct threaded_test_2 *state = private_data; int i; for (i = 0; i < NUM_TEVENT_THREADS; i++) { if (pthread_equal(state->thread_id, thread_map[i])) { break; } } torture_comment(thread_test_ctx, "Callback_2 %u from thread %u\n", thread_counter, i); thread_counter++; } static bool test_multi_tevent_threaded_2(struct torture_context *test, const void *test_data) { unsigned i; struct tevent_context *ev; struct tevent_threaded_context *tctx; int ret; thread_test_ctx = test; thread_counter = 0; ev = tevent_context_init(test); torture_assert(test, ev != NULL, "tevent_context_init failed"); /* * tevent_re_initialise used to have a bug where it did not * re-initialise the thread support after taking it * down. Exercise that code path. */ ret = tevent_re_initialise(ev); torture_assert(test, ret == 0, "tevent_re_initialise failed"); tctx = tevent_threaded_context_create(ev, ev); torture_assert(test, tctx != NULL, "tevent_threaded_context_create failed"); for (i=0; itctx = tctx; state->im = tevent_create_immediate(state); torture_assert(test, state->im != NULL, "tevent_create_immediate failed"); ret = pthread_create(&thread_map[i], NULL, thread_fn_2, state); torture_assert(test, ret == 0, "pthread_create failed"); } while (thread_counter < NUM_TEVENT_THREADS) { ret = tevent_loop_once(ev); torture_assert(test, ret == 0, "tevent_loop_once failed"); } /* Wait for all the threads to finish - join 'em. */ for (i = 0; i < NUM_TEVENT_THREADS; i++) { void *retval; ret = pthread_join(thread_map[i], &retval); torture_assert(test, ret == 0, "pthread_join failed"); /* Free the child thread event context. */ } talloc_free(tctx); talloc_free(ev); return true; } #endif struct torture_suite *torture_local_event(TALLOC_CTX *mem_ctx) { struct torture_suite *suite = torture_suite_create(mem_ctx, "event"); const char **list = tevent_backend_list(suite); int i; for (i=0;list && list[i];i++) { struct torture_suite *backend_suite; backend_suite = torture_suite_create(mem_ctx, list[i]); torture_suite_add_simple_tcase_const(backend_suite, "context", test_event_context, (const void *)list[i]); torture_suite_add_simple_tcase_const(backend_suite, "fd1", test_event_fd1, (const void *)list[i]); torture_suite_add_simple_tcase_const(backend_suite, "fd2", test_event_fd2, (const void *)list[i]); torture_suite_add_simple_tcase_const(backend_suite, "wrapper", test_wrapper, (const void *)list[i]); torture_suite_add_simple_tcase_const(backend_suite, "free_wrapper", test_free_wrapper, (const void *)list[i]); torture_suite_add_suite(suite, backend_suite); } #ifdef HAVE_PTHREAD torture_suite_add_simple_tcase_const(suite, "threaded_poll_mt", test_event_context_threaded, NULL); torture_suite_add_simple_tcase_const(suite, "multi_tevent_threaded", test_multi_tevent_threaded, NULL); torture_suite_add_simple_tcase_const(suite, "multi_tevent_threaded_1", test_multi_tevent_threaded_1, NULL); torture_suite_add_simple_tcase_const(suite, "multi_tevent_threaded_2", test_multi_tevent_threaded_2, NULL); #endif return suite; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent.c0000660000000000000000000005654000000000000014516 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. main select loop and event handling Copyright (C) Andrew Tridgell 2003 Copyright (C) Stefan Metzmacher 2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* PLEASE READ THIS BEFORE MODIFYING! This module is a general abstraction for the main select loop and event handling. Do not ever put any localised hacks in here, instead register one of the possible event types and implement that event somewhere else. There are 2 types of event handling that are handled in this module: 1) a file descriptor becoming readable or writeable. This is mostly used for network sockets, but can be used for any type of file descriptor. You may only register one handler for each file descriptor/io combination or you will get unpredictable results (this means that you can have a handler for read events, and a separate handler for write events, but not two handlers that are both handling read events) 2) a timed event. You can register an event that happens at a specific time. You can register as many of these as you like. They are single shot - add a new timed event in the event handler to get another event. To setup a set of events you first need to create a event_context structure using the function tevent_context_init(); This returns a 'struct tevent_context' that you use in all subsequent calls. After that you can add/remove events that you are interested in using tevent_add_*() and talloc_free() Finally, you call tevent_loop_wait_once() to block waiting for one of the events to occor or tevent_loop_wait() which will loop forever. */ #include "replace.h" #include "system/filesys.h" #ifdef HAVE_PTHREAD #include "system/threads.h" #endif #define TEVENT_DEPRECATED 1 #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" #ifdef HAVE_EVENTFD #include #endif struct tevent_ops_list { struct tevent_ops_list *next, *prev; const char *name; const struct tevent_ops *ops; }; /* list of registered event backends */ static struct tevent_ops_list *tevent_backends = NULL; static char *tevent_default_backend = NULL; /* register an events backend */ bool tevent_register_backend(const char *name, const struct tevent_ops *ops) { struct tevent_ops_list *e; for (e = tevent_backends; e != NULL; e = e->next) { if (0 == strcmp(e->name, name)) { /* already registered, skip it */ return true; } } e = talloc(NULL, struct tevent_ops_list); if (e == NULL) return false; e->name = name; e->ops = ops; DLIST_ADD(tevent_backends, e); return true; } /* set the default event backend */ void tevent_set_default_backend(const char *backend) { talloc_free(tevent_default_backend); tevent_default_backend = talloc_strdup(NULL, backend); } /* initialise backends if not already done */ static void tevent_backend_init(void) { static bool done; if (done) { return; } done = true; tevent_poll_init(); tevent_poll_mt_init(); #if defined(HAVE_EPOLL) tevent_epoll_init(); #elif defined(HAVE_SOLARIS_PORTS) tevent_port_init(); #endif tevent_standard_init(); } _PRIVATE_ const struct tevent_ops *tevent_find_ops_byname(const char *name) { struct tevent_ops_list *e; tevent_backend_init(); if (name == NULL) { name = tevent_default_backend; } if (name == NULL) { name = "standard"; } for (e = tevent_backends; e != NULL; e = e->next) { if (0 == strcmp(e->name, name)) { return e->ops; } } return NULL; } /* list available backends */ const char **tevent_backend_list(TALLOC_CTX *mem_ctx) { const char **list = NULL; struct tevent_ops_list *e; size_t idx = 0; tevent_backend_init(); for (e=tevent_backends;e;e=e->next) { idx += 1; } list = talloc_zero_array(mem_ctx, const char *, idx+1); if (list == NULL) { return NULL; } idx = 0; for (e=tevent_backends;e;e=e->next) { list[idx] = talloc_strdup(list, e->name); if (list[idx] == NULL) { TALLOC_FREE(list); return NULL; } idx += 1; } return list; } static void tevent_common_wakeup_fini(struct tevent_context *ev); #ifdef HAVE_PTHREAD static pthread_mutex_t tevent_contexts_mutex = PTHREAD_MUTEX_INITIALIZER; static struct tevent_context *tevent_contexts = NULL; static pthread_once_t tevent_atfork_initialized = PTHREAD_ONCE_INIT; static void tevent_atfork_prepare(void) { struct tevent_context *ev; int ret; ret = pthread_mutex_lock(&tevent_contexts_mutex); if (ret != 0) { abort(); } for (ev = tevent_contexts; ev != NULL; ev = ev->next) { struct tevent_threaded_context *tctx; for (tctx = ev->threaded_contexts; tctx != NULL; tctx = tctx->next) { ret = pthread_mutex_lock(&tctx->event_ctx_mutex); if (ret != 0) { tevent_abort(ev, "pthread_mutex_lock failed"); } } ret = pthread_mutex_lock(&ev->scheduled_mutex); if (ret != 0) { tevent_abort(ev, "pthread_mutex_lock failed"); } } } static void tevent_atfork_parent(void) { struct tevent_context *ev; int ret; for (ev = DLIST_TAIL(tevent_contexts); ev != NULL; ev = DLIST_PREV(ev)) { struct tevent_threaded_context *tctx; ret = pthread_mutex_unlock(&ev->scheduled_mutex); if (ret != 0) { tevent_abort(ev, "pthread_mutex_unlock failed"); } for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL; tctx = DLIST_PREV(tctx)) { ret = pthread_mutex_unlock(&tctx->event_ctx_mutex); if (ret != 0) { tevent_abort( ev, "pthread_mutex_unlock failed"); } } } ret = pthread_mutex_unlock(&tevent_contexts_mutex); if (ret != 0) { abort(); } } static void tevent_atfork_child(void) { struct tevent_context *ev; int ret; for (ev = DLIST_TAIL(tevent_contexts); ev != NULL; ev = DLIST_PREV(ev)) { struct tevent_threaded_context *tctx; for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL; tctx = DLIST_PREV(tctx)) { tctx->event_ctx = NULL; ret = pthread_mutex_unlock(&tctx->event_ctx_mutex); if (ret != 0) { tevent_abort( ev, "pthread_mutex_unlock failed"); } } ev->threaded_contexts = NULL; ret = pthread_mutex_unlock(&ev->scheduled_mutex); if (ret != 0) { tevent_abort(ev, "pthread_mutex_unlock failed"); } } ret = pthread_mutex_unlock(&tevent_contexts_mutex); if (ret != 0) { abort(); } } static void tevent_prep_atfork(void) { int ret; ret = pthread_atfork(tevent_atfork_prepare, tevent_atfork_parent, tevent_atfork_child); if (ret != 0) { abort(); } } #endif int tevent_common_context_destructor(struct tevent_context *ev) { struct tevent_fd *fd, *fn; struct tevent_timer *te, *tn; struct tevent_immediate *ie, *in; struct tevent_signal *se, *sn; struct tevent_wrapper_glue *gl, *gn; #ifdef HAVE_PTHREAD int ret; #endif if (ev->wrapper.glue != NULL) { tevent_abort(ev, "tevent_common_context_destructor() active on wrapper"); } #ifdef HAVE_PTHREAD ret = pthread_mutex_lock(&tevent_contexts_mutex); if (ret != 0) { abort(); } DLIST_REMOVE(tevent_contexts, ev); ret = pthread_mutex_unlock(&tevent_contexts_mutex); if (ret != 0) { abort(); } while (ev->threaded_contexts != NULL) { struct tevent_threaded_context *tctx = ev->threaded_contexts; ret = pthread_mutex_lock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } /* * Indicate to the thread that the tevent_context is * gone. The counterpart of this is in * _tevent_threaded_schedule_immediate, there we read * this under the threaded_context's mutex. */ tctx->event_ctx = NULL; ret = pthread_mutex_unlock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } DLIST_REMOVE(ev->threaded_contexts, tctx); } ret = pthread_mutex_destroy(&ev->scheduled_mutex); if (ret != 0) { abort(); } #endif for (gl = ev->wrapper.list; gl; gl = gn) { gn = gl->next; gl->main_ev = NULL; DLIST_REMOVE(ev->wrapper.list, gl); } tevent_common_wakeup_fini(ev); for (fd = ev->fd_events; fd; fd = fn) { fn = fd->next; tevent_trace_fd_callback(fd->event_ctx, fd, TEVENT_EVENT_TRACE_DETACH); fd->wrapper = NULL; fd->event_ctx = NULL; DLIST_REMOVE(ev->fd_events, fd); } ev->last_zero_timer = NULL; for (te = ev->timer_events; te; te = tn) { tn = te->next; tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH); te->wrapper = NULL; te->event_ctx = NULL; DLIST_REMOVE(ev->timer_events, te); } for (ie = ev->immediate_events; ie; ie = in) { in = ie->next; tevent_trace_immediate_callback(ie->event_ctx, ie, TEVENT_EVENT_TRACE_DETACH); ie->wrapper = NULL; ie->event_ctx = NULL; ie->cancel_fn = NULL; DLIST_REMOVE(ev->immediate_events, ie); } for (se = ev->signal_events; se; se = sn) { sn = se->next; tevent_trace_signal_callback(se->event_ctx, se, TEVENT_EVENT_TRACE_DETACH); se->wrapper = NULL; se->event_ctx = NULL; DLIST_REMOVE(ev->signal_events, se); /* * This is important, Otherwise signals * are handled twice in child. eg, SIGHUP. * one added in parent, and another one in * the child. -- BoYang */ tevent_cleanup_pending_signal_handlers(se); } /* removing nesting hook or we get an abort when nesting is * not allowed. -- SSS * Note that we need to leave the allowed flag at its current * value, otherwise the use in tevent_re_initialise() will * leave the event context with allowed forced to false, which * will break users that expect nesting to be allowed */ ev->nesting.level = 0; ev->nesting.hook_fn = NULL; ev->nesting.hook_private = NULL; return 0; } static int tevent_common_context_constructor(struct tevent_context *ev) { int ret; #ifdef HAVE_PTHREAD ret = pthread_once(&tevent_atfork_initialized, tevent_prep_atfork); if (ret != 0) { return ret; } ret = pthread_mutex_init(&ev->scheduled_mutex, NULL); if (ret != 0) { return ret; } ret = pthread_mutex_lock(&tevent_contexts_mutex); if (ret != 0) { pthread_mutex_destroy(&ev->scheduled_mutex); return ret; } DLIST_ADD(tevent_contexts, ev); ret = pthread_mutex_unlock(&tevent_contexts_mutex); if (ret != 0) { abort(); } #endif talloc_set_destructor(ev, tevent_common_context_destructor); return 0; } void tevent_common_check_double_free(TALLOC_CTX *ptr, const char *reason) { void *parent_ptr = talloc_parent(ptr); size_t parent_blocks = talloc_total_blocks(parent_ptr); if (parent_ptr != NULL && parent_blocks == 0) { /* * This is an implicit talloc free, as we still have a parent * but it's already being destroyed. Note that * talloc_total_blocks(ptr) also just returns 0 if a * talloc_free(ptr) is still in progress of freeing all * children. */ return; } tevent_abort(NULL, reason); } /* create a event_context structure for a specific implemementation. This must be the first events call, and all subsequent calls pass this event_context as the first element. Event handlers also receive this as their first argument. This function is for allowing third-party-applications to hook in gluecode to their own event loop code, so that they can make async usage of our client libs NOTE: use tevent_context_init() inside of samba! */ struct tevent_context *tevent_context_init_ops(TALLOC_CTX *mem_ctx, const struct tevent_ops *ops, void *additional_data) { struct tevent_context *ev; int ret; ev = talloc_zero(mem_ctx, struct tevent_context); if (!ev) return NULL; ret = tevent_common_context_constructor(ev); if (ret != 0) { talloc_free(ev); return NULL; } ev->ops = ops; ev->additional_data = additional_data; ret = ev->ops->context_init(ev); if (ret != 0) { talloc_free(ev); return NULL; } return ev; } /* create a event_context structure. This must be the first events call, and all subsequent calls pass this event_context as the first element. Event handlers also receive this as their first argument. */ struct tevent_context *tevent_context_init_byname(TALLOC_CTX *mem_ctx, const char *name) { const struct tevent_ops *ops; ops = tevent_find_ops_byname(name); if (ops == NULL) { return NULL; } return tevent_context_init_ops(mem_ctx, ops, NULL); } /* create a event_context structure. This must be the first events call, and all subsequent calls pass this event_context as the first element. Event handlers also receive this as their first argument. */ struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx) { return tevent_context_init_byname(mem_ctx, NULL); } /* add a fd based event return NULL on failure (memory allocation error) */ struct tevent_fd *_tevent_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { return ev->ops->add_fd(ev, mem_ctx, fd, flags, handler, private_data, handler_name, location); } /* set a close function on the fd event */ void tevent_fd_set_close_fn(struct tevent_fd *fde, tevent_fd_close_fn_t close_fn) { if (!fde) return; if (!fde->event_ctx) return; fde->event_ctx->ops->set_fd_close_fn(fde, close_fn); } static void tevent_fd_auto_close_fn(struct tevent_context *ev, struct tevent_fd *fde, int fd, void *private_data) { close(fd); } void tevent_fd_set_auto_close(struct tevent_fd *fde) { tevent_fd_set_close_fn(fde, tevent_fd_auto_close_fn); } /* return the fd event flags */ uint16_t tevent_fd_get_flags(struct tevent_fd *fde) { if (!fde) return 0; if (!fde->event_ctx) return 0; return fde->event_ctx->ops->get_fd_flags(fde); } /* set the fd event flags */ void tevent_fd_set_flags(struct tevent_fd *fde, uint16_t flags) { if (!fde) return; if (!fde->event_ctx) return; fde->event_ctx->ops->set_fd_flags(fde, flags); } bool tevent_signal_support(struct tevent_context *ev) { if (ev->ops->add_signal) { return true; } return false; } static void (*tevent_abort_fn)(const char *reason); void tevent_set_abort_fn(void (*abort_fn)(const char *reason)) { tevent_abort_fn = abort_fn; } void tevent_abort(struct tevent_context *ev, const char *reason) { if (ev != NULL) { tevent_debug(ev, TEVENT_DEBUG_FATAL, "abort: %s\n", reason); } if (!tevent_abort_fn) { abort(); } tevent_abort_fn(reason); } /* add a timer event return NULL on failure */ struct tevent_timer *_tevent_add_timer(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location) { return ev->ops->add_timer(ev, mem_ctx, next_event, handler, private_data, handler_name, location); } /* allocate an immediate event return NULL on failure (memory allocation error) */ struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx, const char *location) { struct tevent_immediate *im; im = talloc(mem_ctx, struct tevent_immediate); if (im == NULL) return NULL; *im = (struct tevent_immediate) { .create_location = location }; return im; } /* schedule an immediate event */ void _tevent_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ev, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location) { ev->ops->schedule_immediate(im, ev, handler, private_data, handler_name, location); } /* add a signal event sa_flags are flags to sigaction(2) return NULL on failure */ struct tevent_signal *_tevent_add_signal(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data, const char *handler_name, const char *location) { return ev->ops->add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data, handler_name, location); } void tevent_loop_allow_nesting(struct tevent_context *ev) { if (ev->wrapper.glue != NULL) { tevent_abort(ev, "tevent_loop_allow_nesting() on wrapper"); return; } if (ev->wrapper.list != NULL) { tevent_abort(ev, "tevent_loop_allow_nesting() with wrapper"); return; } ev->nesting.allowed = true; } void tevent_loop_set_nesting_hook(struct tevent_context *ev, tevent_nesting_hook hook, void *private_data) { if (ev->nesting.hook_fn && (ev->nesting.hook_fn != hook || ev->nesting.hook_private != private_data)) { /* the way the nesting hook code is currently written we cannot support two different nesting hooks at the same time. */ tevent_abort(ev, "tevent: Violation of nesting hook rules\n"); } ev->nesting.hook_fn = hook; ev->nesting.hook_private = private_data; } static void tevent_abort_nesting(struct tevent_context *ev, const char *location) { const char *reason; reason = talloc_asprintf(NULL, "tevent_loop_once() nesting at %s", location); if (!reason) { reason = "tevent_loop_once() nesting"; } tevent_abort(ev, reason); } /* do a single event loop using the events defined in ev */ int _tevent_loop_once(struct tevent_context *ev, const char *location) { int ret; void *nesting_stack_ptr = NULL; ev->nesting.level++; if (ev->nesting.level > 1) { if (!ev->nesting.allowed) { tevent_abort_nesting(ev, location); errno = ELOOP; return -1; } } if (ev->nesting.level > 0) { if (ev->nesting.hook_fn) { int ret2; ret2 = ev->nesting.hook_fn(ev, ev->nesting.hook_private, ev->nesting.level, true, (void *)&nesting_stack_ptr, location); if (ret2 != 0) { ret = ret2; goto done; } } } tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE); ret = ev->ops->loop_once(ev, location); tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE); if (ev->nesting.level > 0) { if (ev->nesting.hook_fn) { int ret2; ret2 = ev->nesting.hook_fn(ev, ev->nesting.hook_private, ev->nesting.level, false, (void *)&nesting_stack_ptr, location); if (ret2 != 0) { ret = ret2; goto done; } } } done: ev->nesting.level--; return ret; } /* this is a performance optimization for the samba4 nested event loop problems */ int _tevent_loop_until(struct tevent_context *ev, bool (*finished)(void *private_data), void *private_data, const char *location) { int ret = 0; void *nesting_stack_ptr = NULL; ev->nesting.level++; if (ev->nesting.level > 1) { if (!ev->nesting.allowed) { tevent_abort_nesting(ev, location); errno = ELOOP; return -1; } } if (ev->nesting.level > 0) { if (ev->nesting.hook_fn) { int ret2; ret2 = ev->nesting.hook_fn(ev, ev->nesting.hook_private, ev->nesting.level, true, (void *)&nesting_stack_ptr, location); if (ret2 != 0) { ret = ret2; goto done; } } } while (!finished(private_data)) { tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE); ret = ev->ops->loop_once(ev, location); tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE); if (ret != 0) { break; } } if (ev->nesting.level > 0) { if (ev->nesting.hook_fn) { int ret2; ret2 = ev->nesting.hook_fn(ev, ev->nesting.hook_private, ev->nesting.level, false, (void *)&nesting_stack_ptr, location); if (ret2 != 0) { ret = ret2; goto done; } } } done: ev->nesting.level--; return ret; } bool tevent_common_have_events(struct tevent_context *ev) { if (ev->fd_events != NULL) { if (ev->fd_events != ev->wakeup_fde) { return true; } if (ev->fd_events->next != NULL) { return true; } /* * At this point we just have the wakeup pipe event as * the only fd_event. That one does not count as a * regular event, so look at the other event types. */ } return ((ev->timer_events != NULL) || (ev->immediate_events != NULL) || (ev->signal_events != NULL)); } /* return on failure or (with 0) if all fd events are removed */ int tevent_common_loop_wait(struct tevent_context *ev, const char *location) { /* * loop as long as we have events pending */ while (tevent_common_have_events(ev)) { int ret; ret = _tevent_loop_once(ev, location); if (ret != 0) { tevent_debug(ev, TEVENT_DEBUG_FATAL, "_tevent_loop_once() failed: %d - %s\n", ret, strerror(errno)); return ret; } } tevent_debug(ev, TEVENT_DEBUG_WARNING, "tevent_common_loop_wait() out of events\n"); return 0; } /* return on failure or (with 0) if all fd events are removed */ int _tevent_loop_wait(struct tevent_context *ev, const char *location) { return ev->ops->loop_wait(ev, location); } /* re-initialise a tevent context. This leaves you with the same event context, but all events are wiped and the structure is re-initialised. This is most useful after a fork() zero is returned on success, non-zero on failure */ int tevent_re_initialise(struct tevent_context *ev) { tevent_common_context_destructor(ev); tevent_common_context_constructor(ev); return ev->ops->context_init(ev); } static void wakeup_pipe_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *_private) { ssize_t ret; do { /* * This is the boilerplate for eventfd, but it works * for pipes too. And as we don't care about the data * we read, we're fine. */ uint64_t val; ret = read(fde->fd, &val, sizeof(val)); } while (ret == -1 && errno == EINTR); } /* * Initialize the wakeup pipe and pipe fde */ int tevent_common_wakeup_init(struct tevent_context *ev) { int ret, read_fd; if (ev->wakeup_fde != NULL) { return 0; } #ifdef HAVE_EVENTFD ret = eventfd(0, EFD_NONBLOCK); if (ret == -1) { return errno; } read_fd = ev->wakeup_fd = ret; #else { int pipe_fds[2]; ret = pipe(pipe_fds); if (ret == -1) { return errno; } ev->wakeup_fd = pipe_fds[1]; ev->wakeup_read_fd = pipe_fds[0]; ev_set_blocking(ev->wakeup_fd, false); ev_set_blocking(ev->wakeup_read_fd, false); read_fd = ev->wakeup_read_fd; } #endif ev->wakeup_fde = tevent_add_fd(ev, ev, read_fd, TEVENT_FD_READ, wakeup_pipe_handler, NULL); if (ev->wakeup_fde == NULL) { close(ev->wakeup_fd); #ifndef HAVE_EVENTFD close(ev->wakeup_read_fd); #endif return ENOMEM; } return 0; } int tevent_common_wakeup_fd(int fd) { ssize_t ret; do { #ifdef HAVE_EVENTFD uint64_t val = 1; ret = write(fd, &val, sizeof(val)); #else char c = '\0'; ret = write(fd, &c, 1); #endif } while ((ret == -1) && (errno == EINTR)); return 0; } int tevent_common_wakeup(struct tevent_context *ev) { if (ev->wakeup_fde == NULL) { return ENOTCONN; } return tevent_common_wakeup_fd(ev->wakeup_fd); } static void tevent_common_wakeup_fini(struct tevent_context *ev) { if (ev->wakeup_fde == NULL) { return; } TALLOC_FREE(ev->wakeup_fde); close(ev->wakeup_fd); #ifndef HAVE_EVENTFD close(ev->wakeup_read_fd); #endif } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent.h0000660000000000000000000024702600000000000014524 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. generalised event loop handling Copyright (C) Andrew Tridgell 2005 Copyright (C) Stefan Metzmacher 2005-2009 Copyright (C) Volker Lendecke 2008 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef __TEVENT_H__ #define __TEVENT_H__ #include #include #include #include /* for old gcc releases that don't have the feature test macro __has_attribute */ #ifndef __has_attribute #define __has_attribute(x) 0 #endif struct tevent_context; struct tevent_ops; struct tevent_fd; struct tevent_timer; struct tevent_immediate; struct tevent_signal; struct tevent_thread_proxy; struct tevent_threaded_context; /** * @defgroup tevent The tevent API * * The tevent low-level API * * This API provides the public interface to manage events in the tevent * mainloop. Functions are provided for managing low-level events such * as timer events, fd events and signal handling. * * @{ */ /* event handler types */ /** * Called when a file descriptor monitored by tevent has * data to be read or written on it. */ typedef void (*tevent_fd_handler_t)(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_data); /** * Called when tevent is ceasing the monitoring of a file descriptor. */ typedef void (*tevent_fd_close_fn_t)(struct tevent_context *ev, struct tevent_fd *fde, int fd, void *private_data); /** * Called when a tevent timer has fired. */ typedef void (*tevent_timer_handler_t)(struct tevent_context *ev, struct tevent_timer *te, struct timeval current_time, void *private_data); /** * Called when a tevent immediate event is invoked. */ typedef void (*tevent_immediate_handler_t)(struct tevent_context *ctx, struct tevent_immediate *im, void *private_data); /** * Called after tevent detects the specified signal. */ typedef void (*tevent_signal_handler_t)(struct tevent_context *ev, struct tevent_signal *se, int signum, int count, void *siginfo, void *private_data); /** * @brief Create a event_context structure. * * This must be the first events call, and all subsequent calls pass this * event_context as the first element. Event handlers also receive this as * their first argument. * * @param[in] mem_ctx The memory context to use. * * @return An allocated tevent context, NULL on error. * * @see tevent_context_init() */ struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx); /** * @brief Create a event_context structure and select a specific backend. * * This must be the first events call, and all subsequent calls pass this * event_context as the first element. Event handlers also receive this as * their first argument. * * @param[in] mem_ctx The memory context to use. * * @param[in] name The name of the backend to use. * * @return An allocated tevent context, NULL on error. */ struct tevent_context *tevent_context_init_byname(TALLOC_CTX *mem_ctx, const char *name); /** * @brief Create a custom event context * * @param[in] mem_ctx The memory context to use. * @param[in] ops The function pointer table of the backend. * @param[in] additional_data The additional/private data to this instance * * @return An allocated tevent context, NULL on error. * */ struct tevent_context *tevent_context_init_ops(TALLOC_CTX *mem_ctx, const struct tevent_ops *ops, void *additional_data); /** * @brief List available backends. * * @param[in] mem_ctx The memory context to use. * * @return A string vector with a terminating NULL element, NULL * on error. */ const char **tevent_backend_list(TALLOC_CTX *mem_ctx); /** * @brief Set the default tevent backend. * * @param[in] backend The name of the backend to set. */ void tevent_set_default_backend(const char *backend); #ifdef DOXYGEN /** * @brief Add a file descriptor based event. * * @param[in] ev The event context to work on. * * @param[in] mem_ctx The talloc memory context to use. * * @param[in] fd The file descriptor to base the event on. * * @param[in] flags #TEVENT_FD_READ or #TEVENT_FD_WRITE * * @param[in] handler The callback handler for the event. * * @param[in] private_data The private data passed to the callback handler. * * @return The file descriptor based event, NULL on error. * * @note To cancel the monitoring of a file descriptor, call talloc_free() * on the object returned by this function. * * @note The caller should avoid closing the file descriptor before * calling talloc_free()! Otherwise the behaviour is undefined which * might result in crashes. See https://bugzilla.samba.org/show_bug.cgi?id=11141 * for an example. */ struct tevent_fd *tevent_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data); #else struct tevent_fd *_tevent_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location); #define tevent_add_fd(ev, mem_ctx, fd, flags, handler, private_data) \ _tevent_add_fd(ev, mem_ctx, fd, flags, handler, private_data, \ #handler, __location__) #endif /** * @brief Associate a custom tag with the event. * * This tag can be then retrieved with tevent_fd_get_tag() * * @param[in] fde The file descriptor event. * * @param[in] tag Custom tag. */ void tevent_fd_set_tag(struct tevent_fd *fde, uint64_t tag); /** * @brief Get custom event tag. */ uint64_t tevent_fd_get_tag(const struct tevent_fd *fde); #ifdef DOXYGEN /** * @brief Add a timed event * * @param[in] ev The event context to work on. * * @param[in] mem_ctx The talloc memory context to use. * * @param[in] next_event Timeval specifying the absolute time to fire this * event. This is not an offset. * * @param[in] handler The callback handler for the event. * * @param[in] private_data The private data passed to the callback handler. * * @return The newly-created timer event, or NULL on error. * * @note To cancel a timer event before it fires, call talloc_free() on the * event returned from this function. This event is automatically * talloc_free()-ed after its event handler files, if it hasn't been freed yet. * * @note Unlike some mainloops, tevent timers are one-time events. To set up * a recurring event, it is necessary to call tevent_add_timer() again during * the handler processing. * * @note Due to the internal mainloop processing, a timer set to run * immediately will do so after any other pending timers fire, but before * any further file descriptor or signal handling events fire. Callers should * not rely on this behavior! */ struct tevent_timer *tevent_add_timer(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data); #else struct tevent_timer *_tevent_add_timer(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location); #define tevent_add_timer(ev, mem_ctx, next_event, handler, private_data) \ _tevent_add_timer(ev, mem_ctx, next_event, handler, private_data, \ #handler, __location__) #endif /** * @brief Set the time a tevent_timer fires * * @param[in] te The timer event to reset * * @param[in] next_event Timeval specifying the absolute time to fire this * event. This is not an offset. */ void tevent_update_timer(struct tevent_timer *te, struct timeval next_event); /** * @brief Associate a custom tag with the event. * * This tag can be then retrieved with tevent_timer_get_tag() * * @param[in] te The timer event. * * @param[in] tag Custom tag. */ void tevent_timer_set_tag(struct tevent_timer *te, uint64_t tag); /** * @brief Get custom event tag. */ uint64_t tevent_timer_get_tag(const struct tevent_timer *te); #ifdef DOXYGEN /** * Initialize an immediate event object * * This object can be used to trigger an event to occur immediately after * returning from the current event (before any other event occurs) * * @param[in] mem_ctx The talloc memory context to use as the parent * * @return An empty tevent_immediate object. Use tevent_schedule_immediate * to populate and use it. * * @note Available as of tevent 0.9.8 */ struct tevent_immediate *tevent_create_immediate(TALLOC_CTX *mem_ctx); #else struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx, const char *location); #define tevent_create_immediate(mem_ctx) \ _tevent_create_immediate(mem_ctx, __location__) #endif #ifdef DOXYGEN /** * Schedule an event for immediate execution. This event will occur * immediately after returning from the current event (before any other * event occurs) * * @param[in] im The tevent_immediate object to populate and use * @param[in] ctx The tevent_context to run this event * @param[in] handler The event handler to run when this event fires * @param[in] private_data Data to pass to the event handler */ void tevent_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ctx, tevent_immediate_handler_t handler, void *private_data); #else void _tevent_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ctx, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location); #define tevent_schedule_immediate(im, ctx, handler, private_data) \ _tevent_schedule_immediate(im, ctx, handler, private_data, \ #handler, __location__); #endif /** * @brief Associate a custom tag with the event. * * This tag can be then retrieved with tevent_immediate_get_tag() * * @param[in] im The immediate event. * * @param[in] tag Custom tag. */ void tevent_immediate_set_tag(struct tevent_immediate *im, uint64_t tag); /** * @brief Get custom event tag. */ uint64_t tevent_immediate_get_tag(const struct tevent_immediate *fde); #ifdef DOXYGEN /** * @brief Add a tevent signal handler * * tevent_add_signal() creates a new event for handling a signal the next * time through the mainloop. It implements a very simple traditional signal * handler whose only purpose is to add the handler event into the mainloop. * * @param[in] ev The event context to work on. * * @param[in] mem_ctx The talloc memory context to use. * * @param[in] signum The signal to trap * * @param[in] handler The callback handler for the signal. * * @param[in] sa_flags sigaction flags for this signal handler. * * @param[in] private_data The private data passed to the callback handler. * * @return The newly-created signal handler event, or NULL on error. * * @note To cancel a signal handler, call talloc_free() on the event returned * from this function. * * @see tevent_num_signals, tevent_sa_info_queue_count */ struct tevent_signal *tevent_add_signal(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data); #else struct tevent_signal *_tevent_add_signal(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data, const char *handler_name, const char *location); #define tevent_add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data) \ _tevent_add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data, \ #handler, __location__) #endif /** * @brief Associate a custom tag with the event. * * This tag can be then retrieved with tevent_signal_get_tag() * * @param[in] fde The signal event. * * @param[in] tag Custom tag. */ void tevent_signal_set_tag(struct tevent_signal *se, uint64_t tag); /** * @brief Get custom event tag. */ uint64_t tevent_signal_get_tag(const struct tevent_signal *se); /** * @brief the number of supported signals * * This returns value of the configure time TEVENT_NUM_SIGNALS constant. * * The 'signum' argument of tevent_add_signal() must be less than * TEVENT_NUM_SIGNALS. * * @see tevent_add_signal */ size_t tevent_num_signals(void); /** * @brief the number of pending realtime signals * * This returns value of TEVENT_SA_INFO_QUEUE_COUNT. * * The tevent internals remember the last TEVENT_SA_INFO_QUEUE_COUNT * siginfo_t structures for SA_SIGINFO signals. If the system generates * more some signals get lost. * * @see tevent_add_signal */ size_t tevent_sa_info_queue_count(void); #ifdef DOXYGEN /** * @brief Pass a single time through the mainloop * * This will process any appropriate signal, immediate, fd and timer events * * @param[in] ev The event context to process * * @return Zero on success, nonzero if an internal error occurred */ int tevent_loop_once(struct tevent_context *ev); #else int _tevent_loop_once(struct tevent_context *ev, const char *location); #define tevent_loop_once(ev) \ _tevent_loop_once(ev, __location__) #endif #ifdef DOXYGEN /** * @brief Run the mainloop * * The mainloop will run until there are no events remaining to be processed * * @param[in] ev The event context to process * * @return Zero if all events have been processed. Nonzero if an internal * error occurred. */ int tevent_loop_wait(struct tevent_context *ev); #else int _tevent_loop_wait(struct tevent_context *ev, const char *location); #define tevent_loop_wait(ev) \ _tevent_loop_wait(ev, __location__) #endif /** * Assign a function to run when a tevent_fd is freed * * This function is a destructor for the tevent_fd. It does not automatically * close the file descriptor. If this is the desired behavior, then it must be * performed by the close_fn. * * @param[in] fde File descriptor event on which to set the destructor * @param[in] close_fn Destructor to execute when fde is freed * * @note That the close_fn() on tevent_fd is *NOT* wrapped on contexts * created by tevent_context_wrapper_create()! * * @see tevent_fd_set_close_fn * @see tevent_context_wrapper_create */ void tevent_fd_set_close_fn(struct tevent_fd *fde, tevent_fd_close_fn_t close_fn); /** * Automatically close the file descriptor when the tevent_fd is freed * * This function calls close(fd) internally. * * @param[in] fde File descriptor event to auto-close * * @see tevent_fd_set_close_fn */ void tevent_fd_set_auto_close(struct tevent_fd *fde); /** * Return the flags set on this file descriptor event * * @param[in] fde File descriptor event to query * * @return The flags set on the event. See #TEVENT_FD_READ and * #TEVENT_FD_WRITE */ uint16_t tevent_fd_get_flags(struct tevent_fd *fde); /** * Set flags on a file descriptor event * * @param[in] fde File descriptor event to set * @param[in] flags Flags to set on the event. See #TEVENT_FD_READ and * #TEVENT_FD_WRITE */ void tevent_fd_set_flags(struct tevent_fd *fde, uint16_t flags); /** * Query whether tevent supports signal handling * * @param[in] ev An initialized tevent context * * @return True if this platform and tevent context support signal handling */ bool tevent_signal_support(struct tevent_context *ev); void tevent_set_abort_fn(void (*abort_fn)(const char *reason)); /* bits for file descriptor event flags */ /** * Monitor a file descriptor for data to be read */ #define TEVENT_FD_READ 1 /** * Monitor a file descriptor for writeability */ #define TEVENT_FD_WRITE 2 /** * Convenience function for declaring a tevent_fd writable */ #define TEVENT_FD_WRITEABLE(fde) \ tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) | TEVENT_FD_WRITE) /** * Convenience function for declaring a tevent_fd readable */ #define TEVENT_FD_READABLE(fde) \ tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) | TEVENT_FD_READ) /** * Convenience function for declaring a tevent_fd non-writable */ #define TEVENT_FD_NOT_WRITEABLE(fde) \ tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) & ~TEVENT_FD_WRITE) /** * Convenience function for declaring a tevent_fd non-readable */ #define TEVENT_FD_NOT_READABLE(fde) \ tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) & ~TEVENT_FD_READ) /** * Debug level of tevent */ enum tevent_debug_level { TEVENT_DEBUG_FATAL, TEVENT_DEBUG_ERROR, TEVENT_DEBUG_WARNING, TEVENT_DEBUG_TRACE }; /** * @brief The tevent debug callbac. * * @param[in] context The memory context to use. * * @param[in] level The debug level. * * @param[in] fmt The format string. * * @param[in] ap The arguments for the format string. */ typedef void (*tevent_debug_fn)(void *context, enum tevent_debug_level level, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0); /** * Set destination for tevent debug messages * * @param[in] ev Event context to debug * @param[in] debug Function to handle output printing * @param[in] context The context to pass to the debug function. * * @return Always returns 0 as of version 0.9.8 * * @note Default is to emit no debug messages */ int tevent_set_debug(struct tevent_context *ev, tevent_debug_fn debug, void *context); /** * Designate stderr for debug message output * * @param[in] ev Event context to debug * * @note This function will only output TEVENT_DEBUG_FATAL, TEVENT_DEBUG_ERROR * and TEVENT_DEBUG_WARNING messages. For TEVENT_DEBUG_TRACE, please define a * function for tevent_set_debug() */ int tevent_set_debug_stderr(struct tevent_context *ev); enum tevent_trace_point { /** * Corresponds to a trace point just before waiting */ TEVENT_TRACE_BEFORE_WAIT, /** * Corresponds to a trace point just after waiting */ TEVENT_TRACE_AFTER_WAIT, #define TEVENT_HAS_LOOP_ONCE_TRACE_POINTS 1 /** * Corresponds to a trace point just before calling * the loop_once() backend function. */ TEVENT_TRACE_BEFORE_LOOP_ONCE, /** * Corresponds to a trace point right after the * loop_once() backend function has returned. */ TEVENT_TRACE_AFTER_LOOP_ONCE, }; typedef void (*tevent_trace_callback_t)(enum tevent_trace_point, void *private_data); /** * Register a callback to be called at certain trace points * * @param[in] ev Event context * @param[in] cb Trace callback * @param[in] private_data Data to be passed to callback * * @note The callback will be called at trace points defined by * tevent_trace_point. Call with NULL to reset. */ void tevent_set_trace_callback(struct tevent_context *ev, tevent_trace_callback_t cb, void *private_data); /** * Retrieve the current trace callback * * @param[in] ev Event context * @param[out] cb Registered trace callback * @param[out] private_data Registered data to be passed to callback * * @note This can be used to allow one component that wants to * register a callback to respect the callback that another component * has already registered. */ void tevent_get_trace_callback(struct tevent_context *ev, tevent_trace_callback_t *cb, void *private_data); enum tevent_event_trace_point { /** * Corresponds to a trace point just before the event is added. */ TEVENT_EVENT_TRACE_ATTACH, /** * Corresponds to a trace point just before the event is removed. */ TEVENT_EVENT_TRACE_DETACH, /** * Corresponds to a trace point just before the event handler is called. */ TEVENT_EVENT_TRACE_BEFORE_HANDLER, }; typedef void (*tevent_trace_fd_callback_t)(struct tevent_fd *fde, enum tevent_event_trace_point, void *private_data); typedef void (*tevent_trace_signal_callback_t)(struct tevent_signal *se, enum tevent_event_trace_point, void *private_data); typedef void (*tevent_trace_timer_callback_t)(struct tevent_timer *te, enum tevent_event_trace_point, void *private_data); typedef void (*tevent_trace_immediate_callback_t)(struct tevent_immediate *im, enum tevent_event_trace_point, void *private_data); /** * Register a callback to be called at certain trace points of fd event. * * @param[in] ev Event context * @param[in] cb Trace callback * @param[in] private_data Data to be passed to callback * * @note The callback will be called at trace points defined by * tevent_event_trace_point. Call with NULL to reset. */ void tevent_set_trace_fd_callback(struct tevent_context *ev, tevent_trace_fd_callback_t cb, void *private_data); /** * Retrieve the current trace callback of file descriptor event. * * @param[in] ev Event context * @param[out] cb Registered trace callback * @param[out] p_private_data Registered data to be passed to callback * * @note This can be used to allow one component that wants to * register a callback to respect the callback that another component * has already registered. */ void tevent_get_trace_fd_callback(struct tevent_context *ev, tevent_trace_fd_callback_t *cb, void *p_private_data); /** * Register a callback to be called at certain trace points of signal event. * * @param[in] ev Event context * @param[in] cb Trace callback * @param[in] private_data Data to be passed to callback * * @note The callback will be called at trace points defined by * tevent_event_trace_point. Call with NULL to reset. */ void tevent_set_trace_signal_callback(struct tevent_context *ev, tevent_trace_signal_callback_t cb, void *private_data); /** * Retrieve the current trace callback of signal event. * * @param[in] ev Event context * @param[out] cb Registered trace callback * @param[out] p_private_data Registered data to be passed to callback * * @note This can be used to allow one component that wants to * register a callback to respect the callback that another component * has already registered. */ void tevent_get_trace_signal_callback(struct tevent_context *ev, tevent_trace_signal_callback_t *cb, void *p_private_data); /** * Register a callback to be called at certain trace points of timer event. * * @param[in] ev Event context * @param[in] cb Trace callback * @param[in] private_data Data to be passed to callback * * @note The callback will be called at trace points defined by * tevent_event_trace_point. Call with NULL to reset. */ void tevent_set_trace_timer_callback(struct tevent_context *ev, tevent_trace_timer_callback_t cb, void *private_data); /** * Retrieve the current trace callback of timer event. * * @param[in] ev Event context * @param[out] cb Registered trace callback * @param[out] p_private_data Registered data to be passed to callback * * @note This can be used to allow one component that wants to * register a callback to respect the callback that another component * has already registered. */ void tevent_get_trace_timer_callback(struct tevent_context *ev, tevent_trace_timer_callback_t *cb, void *p_private_data); /** * Register a callback to be called at certain trace points of immediate event. * * @param[in] ev Event context * @param[in] cb Trace callback * @param[in] private_data Data to be passed to callback * * @note The callback will be called at trace points defined by * tevent_event_trace_point. Call with NULL to reset. */ void tevent_set_trace_immediate_callback(struct tevent_context *ev, tevent_trace_immediate_callback_t cb, void *private_data); /** * Retrieve the current trace callback of immediate event. * * @param[in] ev Event context * @param[out] cb Registered trace callback * @param[out] p_private_data Registered data to be passed to callback * * @note This can be used to allow one component that wants to * register a callback to respect the callback that another component * has already registered. */ void tevent_get_trace_immediate_callback(struct tevent_context *ev, tevent_trace_immediate_callback_t *cb, void *p_private_data); /** * @} */ /** * @defgroup tevent_request The tevent request functions. * @ingroup tevent * * A tevent_req represents an asynchronous computation. * * The tevent_req group of API calls is the recommended way of * programming async computations within tevent. In particular the * file descriptor (tevent_add_fd) and timer (tevent_add_timed) events * are considered too low-level to be used in larger computations. To * read and write from and to sockets, Samba provides two calls on top * of tevent_add_fd: tstream_read_packet_send/recv and tstream_writev_send/recv. * These requests are much easier to compose than the low-level event * handlers called from tevent_add_fd. * * A lot of the simplicity tevent_req has brought to the notoriously * hairy async programming came via a set of conventions that every * async computation programmed should follow. One central piece of * these conventions is the naming of routines and variables. * * Every async computation needs a name (sensibly called "computation" * down from here). From this name quite a few naming conventions are * derived. * * Every computation that requires local state needs a * @code * struct computation_state { * int local_var; * }; * @endcode * Even if no local variables are required, such a state struct should * be created containing a dummy variable. Quite a few helper * functions and macros (for example tevent_req_create()) assume such * a state struct. * * An async computation is started by a computation_send * function. When it is finished, its result can be received by a * computation_recv function. For an example how to set up an async * computation, see the code example in the documentation for * tevent_req_create() and tevent_req_post(). The prototypes for _send * and _recv functions should follow some conventions: * * @code * struct tevent_req *computation_send(TALLOC_CTX *mem_ctx, * struct tevent_context *ev, * ... further args); * int computation_recv(struct tevent_req *req, ... further output args); * @endcode * * The "int" result of computation_recv() depends on the result the * sync version of the function would have, "int" is just an example * here. * * Another important piece of the conventions is that the program flow * is interrupted as little as possible. Because a blocking * sub-computation requires that the flow needs to continue in a * separate function that is the logical sequel of some computation, * it should lexically follow sending off the blocking * sub-computation. Setting the callback function via * tevent_req_set_callback() requires referencing a function lexically * below the call to tevent_req_set_callback(), forward declarations * are required. A lot of the async computations thus begin with a * sequence of declarations such as * * @code * static void computation_step1_done(struct tevent_req *subreq); * static void computation_step2_done(struct tevent_req *subreq); * static void computation_step3_done(struct tevent_req *subreq); * @endcode * * It really helps readability a lot to do these forward declarations, * because the lexically sequential program flow makes the async * computations almost as clear to read as a normal, sync program * flow. * * It is up to the user of the async computation to talloc_free it * after it has finished. If an async computation should be aborted, * the tevent_req structure can be talloc_free'ed. After it has * finished, it should talloc_free'ed by the API user. * * tevent_req variable naming conventions: * * The name of the variable pointing to the tevent_req structure * returned by a _send() function SHOULD be named differently between * implementation and caller. * * From the point of view of the implementation (of the _send() and * _recv() functions) the variable returned by tevent_req_create() is * always called @em req. * * While the caller of the _send() function should use @em subreq to * hold the result. * * @see tevent_req_create() * @see tevent_req_fn() * * @{ */ /** * An async request moves from TEVENT_REQ_INIT to * TEVENT_REQ_IN_PROGRESS. All other states are valid after a request * has finished. */ enum tevent_req_state { /** * We are creating the request */ TEVENT_REQ_INIT, /** * We are waiting the request to complete */ TEVENT_REQ_IN_PROGRESS, /** * The request is finished successfully */ TEVENT_REQ_DONE, /** * A user error has occurred. The user error has been * indicated by tevent_req_error(), it can be retrieved via * tevent_req_is_error(). */ TEVENT_REQ_USER_ERROR, /** * Request timed out after the timeout set by tevent_req_set_endtime. */ TEVENT_REQ_TIMED_OUT, /** * An internal allocation has failed, or tevent_req_nomem has * been given a NULL pointer as the first argument. */ TEVENT_REQ_NO_MEMORY, /** * The request has been received by the caller. No further * action is valid. */ TEVENT_REQ_RECEIVED }; /** * @brief An async request */ struct tevent_req; /** * @brief A tevent request callback function. * * @param[in] subreq The tevent async request which executed this callback. */ typedef void (*tevent_req_fn)(struct tevent_req *subreq); /** * @brief Set an async request callback. * * See the documentation of tevent_req_post() for an example how this * is supposed to be used. * * @param[in] req The async request to set the callback. * * @param[in] fn The callback function to set. * * @param[in] pvt A pointer to private data to pass to the async request * callback. */ void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt); #ifdef DOXYGEN /** * @brief Get the private data cast to the given type for a callback from * a tevent request structure. * * @code * static void computation_done(struct tevent_req *subreq) { * struct tevent_req *req = tevent_req_callback_data(subreq, struct tevent_req); * struct computation_state *state = tevent_req_data(req, struct computation_state); * .... more things, eventually maybe call tevent_req_done(req); * } * @endcode * * @param[in] req The structure to get the callback data from. * * @param[in] type The type of the private callback data to get. * * @return The type casted private data set NULL if not set. */ void *tevent_req_callback_data(struct tevent_req *req, #type); #else void *_tevent_req_callback_data(struct tevent_req *req); #define tevent_req_callback_data(_req, _type) \ talloc_get_type_abort(_tevent_req_callback_data(_req), _type) #endif #ifdef DOXYGEN /** * @brief Get the private data for a callback from a tevent request structure. * * @param[in] req The structure to get the callback data from. * * @return The private data or NULL if not set. */ void *tevent_req_callback_data_void(struct tevent_req *req); #else #define tevent_req_callback_data_void(_req) \ _tevent_req_callback_data(_req) #endif #ifdef DOXYGEN /** * @brief Get the private data from a tevent request structure. * * When the tevent_req has been created by tevent_req_create, the * result of tevent_req_data() is the state variable created by * tevent_req_create() as a child of the req. * * @param[in] req The structure to get the private data from. * * @param[in] type The type of the private data * * @return The private data or NULL if not set. */ void *tevent_req_data(struct tevent_req *req, #type); #else void *_tevent_req_data(struct tevent_req *req); #define tevent_req_data(_req, _type) \ talloc_get_type_abort(_tevent_req_data(_req), _type) #endif /** * @brief The print function which can be set for a tevent async request. * * @param[in] req The tevent async request. * * @param[in] ctx A talloc memory context which can be uses to allocate * memory. * * @return An allocated string buffer to print. * * Example: * @code * static char *my_print(struct tevent_req *req, TALLOC_CTX *mem_ctx) * { * struct my_data *data = tevent_req_data(req, struct my_data); * char *result; * * result = tevent_req_default_print(mem_ctx, req); * if (result == NULL) { * return NULL; * } * * return talloc_asprintf_append_buffer(result, "foo=%d, bar=%d", * data->foo, data->bar); * } * @endcode */ typedef char *(*tevent_req_print_fn)(struct tevent_req *req, TALLOC_CTX *ctx); /** * @brief This function sets a print function for the given request. * * This function can be used to setup a print function for the given request. * This will be triggered if the tevent_req_print() function was * called on the given request. * * @param[in] req The request to use. * * @param[in] fn A pointer to the print function * * @note This function should only be used for debugging. */ void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn); /** * @brief The default print function for creating debug messages. * * The function should not be used by users of the async API, * but custom print function can use it and append custom text * to the string. * * @param[in] req The request to be printed. * * @param[in] mem_ctx The memory context for the result. * * @return Text representation of request. * */ char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx); /** * @brief Print an tevent_req structure in debug messages. * * This function should be used by callers of the async API. * * @param[in] mem_ctx The memory context for the result. * * @param[in] req The request to be printed. * * @return Text representation of request. */ char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req); /** * @brief A typedef for a cancel function for a tevent request. * * @param[in] req The tevent request calling this function. * * @return True if the request could be canceled, false if not. */ typedef bool (*tevent_req_cancel_fn)(struct tevent_req *req); /** * @brief This function sets a cancel function for the given tevent request. * * This function can be used to setup a cancel function for the given request. * This will be triggered if the tevent_req_cancel() function was * called on the given request. * * @param[in] req The request to use. * * @param[in] fn A pointer to the cancel function. */ void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn); #ifdef DOXYGEN /** * @brief Try to cancel the given tevent request. * * This function can be used to cancel the given request. * * It is only possible to cancel a request when the implementation * has registered a cancel function via the tevent_req_set_cancel_fn(). * * @param[in] req The request to use. * * @return This function returns true if the request is * cancelable, otherwise false is returned. * * @note Even if the function returns true, the caller need to wait * for the function to complete normally. * Only the _recv() function of the given request indicates * if the request was really canceled. */ bool tevent_req_cancel(struct tevent_req *req); #else bool _tevent_req_cancel(struct tevent_req *req, const char *location); #define tevent_req_cancel(req) \ _tevent_req_cancel(req, __location__) #endif /** * @brief A typedef for a cleanup function for a tevent request. * * @param[in] req The tevent request calling this function. * * @param[in] req_state The current tevent_req_state. * */ typedef void (*tevent_req_cleanup_fn)(struct tevent_req *req, enum tevent_req_state req_state); /** * @brief This function sets a cleanup function for the given tevent request. * * This function can be used to setup a cleanup function for the given request. * This will be triggered when the tevent_req_done() or tevent_req_error() * function was called, before notifying the callers callback function, * and also before scheduling the deferred trigger. * * This might be useful if more than one tevent_req belong together * and need to finish both requests at the same time. * * The cleanup function is able to call tevent_req_done() or tevent_req_error() * recursively, the cleanup function is only triggered the first time. * * The cleanup function is also called by tevent_req_received() * (possibly triggered from tevent_req_destructor()) before destroying * the private data of the tevent_req. * * @param[in] req The request to use. * * @param[in] fn A pointer to the cancel function. */ void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn); #ifdef DOXYGEN /** * @brief Create an async tevent request. * * The new async request will be initialized in state TEVENT_REQ_IN_PROGRESS. * * @code * struct tevent_req *req; * struct computation_state *state; * req = tevent_req_create(mem_ctx, &state, struct computation_state); * @endcode * * Tevent_req_create() allocates and zeros the state variable as a talloc * child of its result. The state variable should be used as the talloc * parent for all temporary variables that are allocated during the async * computation. This way, when the user of the async computation frees * the request, the state as a talloc child will be free'd along with * all the temporary variables hanging off the state. * * @param[in] mem_ctx The memory context for the result. * @param[in] pstate Pointer to the private request state. * @param[in] type The name of the request. * * @return A new async request. NULL on error. */ struct tevent_req *tevent_req_create(TALLOC_CTX *mem_ctx, void **pstate, #type); #else struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx, void *pstate, size_t state_size, const char *type, const char *location); #define tevent_req_create(_mem_ctx, _pstate, _type) \ _tevent_req_create((_mem_ctx), (_pstate), sizeof(_type), \ #_type, __location__) #endif /** * @brief Set a timeout for an async request. On failure, "req" is already * set to state TEVENT_REQ_NO_MEMORY. * * @param[in] req The request to set the timeout for. * * @param[in] ev The event context to use for the timer. * * @param[in] endtime The endtime of the request. * * @return True if succeeded, false if not. */ bool tevent_req_set_endtime(struct tevent_req *req, struct tevent_context *ev, struct timeval endtime); /** * @brief Reset the timer set by tevent_req_set_endtime. * * @param[in] req The request to reset the timeout for */ void tevent_req_reset_endtime(struct tevent_req *req); #ifdef DOXYGEN /** * @brief Call the notify callback of the given tevent request manually. * * @param[in] req The tevent request to call the notify function from. * * @see tevent_req_set_callback() */ void tevent_req_notify_callback(struct tevent_req *req); #else void _tevent_req_notify_callback(struct tevent_req *req, const char *location); #define tevent_req_notify_callback(req) \ _tevent_req_notify_callback(req, __location__) #endif #ifdef DOXYGEN /** * @brief An async request has successfully finished. * * This function is to be used by implementors of async requests. When a * request is successfully finished, this function calls the user's completion * function. * * @param[in] req The finished request. */ void tevent_req_done(struct tevent_req *req); #else void _tevent_req_done(struct tevent_req *req, const char *location); #define tevent_req_done(req) \ _tevent_req_done(req, __location__) #endif #ifdef DOXYGEN /** * @brief An async request has seen an error. * * This function is to be used by implementors of async requests. When a * request can not successfully completed, the implementation should call this * function with the appropriate status code. * * If error is 0 the function returns false and does nothing more. * * @param[in] req The request with an error. * * @param[in] error The error code. * * @return On success true is returned, false if error is 0. * * @code * int error = first_function(); * if (tevent_req_error(req, error)) { * return; * } * * error = second_function(); * if (tevent_req_error(req, error)) { * return; * } * * tevent_req_done(req); * return; * @endcode */ bool tevent_req_error(struct tevent_req *req, uint64_t error); #else bool _tevent_req_error(struct tevent_req *req, uint64_t error, const char *location); #define tevent_req_error(req, error) \ _tevent_req_error(req, error, __location__) #endif #ifdef DOXYGEN /** * @brief Helper function for nomem check. * * Convenience helper to easily check alloc failure within a callback * implementing the next step of an async request. * * @param[in] p The pointer to be checked. * * @param[in] req The request being processed. * * @code * p = talloc(mem_ctx, bla); * if (tevent_req_nomem(p, req)) { * return; * } * @endcode */ bool tevent_req_nomem(const void *p, struct tevent_req *req); #else bool _tevent_req_nomem(const void *p, struct tevent_req *req, const char *location); #define tevent_req_nomem(p, req) \ _tevent_req_nomem(p, req, __location__) #endif #ifdef DOXYGEN /** * @brief Indicate out of memory to a request * * @param[in] req The request being processed. */ void tevent_req_oom(struct tevent_req *req); #else void _tevent_req_oom(struct tevent_req *req, const char *location); #define tevent_req_oom(req) \ _tevent_req_oom(req, __location__) #endif /** * @brief Finish a request before the caller had a chance to set the callback. * * An implementation of an async request might find that it can either finish * the request without waiting for an external event, or it can not even start * the engine. To present the illusion of a callback to the user of the API, * the implementation can call this helper function which triggers an * immediate event. This way the caller can use the same calling * conventions, independent of whether the request was actually deferred. * * @code * struct tevent_req *computation_send(TALLOC_CTX *mem_ctx, * struct tevent_context *ev) * { * struct tevent_req *req, *subreq; * struct computation_state *state; * req = tevent_req_create(mem_ctx, &state, struct computation_state); * if (req == NULL) { * return NULL; * } * subreq = subcomputation_send(state, ev); * if (tevent_req_nomem(subreq, req)) { * return tevent_req_post(req, ev); * } * tevent_req_set_callback(subreq, computation_done, req); * return req; * } * @endcode * * @param[in] req The finished request. * * @param[in] ev The tevent_context for the immediate event. * * @return The given request will be returned. */ struct tevent_req *tevent_req_post(struct tevent_req *req, struct tevent_context *ev); /** * @brief Finish multiple requests within one function * * Normally tevent_req_notify_callback() and all wrappers * (e.g. tevent_req_done() and tevent_req_error()) * need to be the last thing an event handler should call. * This is because the callback is likely to destroy the * context of the current function. * * If a function wants to notify more than one caller, * it is dangerous if it just triggers multiple callbacks * in a row. With tevent_req_defer_callback() it is possible * to set an event context that will be used to defer the callback * via an immediate event (similar to tevent_req_post()). * * @code * struct complete_state { * struct tevent_context *ev; * * struct tevent_req **reqs; * }; * * void complete(struct complete_state *state) * { * size_t i, c = talloc_array_length(state->reqs); * * for (i=0; i < c; i++) { * tevent_req_defer_callback(state->reqs[i], state->ev); * tevent_req_done(state->reqs[i]); * } * } * @endcode * * @param[in] req The finished request. * * @param[in] ev The tevent_context for the immediate event. * * @return The given request will be returned. */ void tevent_req_defer_callback(struct tevent_req *req, struct tevent_context *ev); /** * @brief Check if the given request is still in progress. * * It is typically used by sync wrapper functions. * * @param[in] req The request to poll. * * @return The boolean form of "is in progress". */ bool tevent_req_is_in_progress(struct tevent_req *req); /** * @brief Actively poll for the given request to finish. * * This function is typically used by sync wrapper functions. * * @param[in] req The request to poll. * * @param[in] ev The tevent_context to be used. * * @return On success true is returned. If a critical error has * happened in the tevent loop layer false is returned. * This is not the return value of the given request! * * @note This should only be used if the given tevent context was created by the * caller, to avoid event loop nesting. * * @code * req = tstream_writev_queue_send(mem_ctx, * ev_ctx, * tstream, * send_queue, * iov, 2); * ok = tevent_req_poll(req, tctx->ev); * rc = tstream_writev_queue_recv(req, &sys_errno); * TALLOC_FREE(req); * @endcode */ bool tevent_req_poll(struct tevent_req *req, struct tevent_context *ev); /** * @brief Get the tevent request state and the actual error set by * tevent_req_error. * * @code * int computation_recv(struct tevent_req *req, uint64_t *perr) * { * enum tevent_req_state state; * uint64_t err; * if (tevent_req_is_error(req, &state, &err)) { * *perr = err; * return -1; * } * return 0; * } * @endcode * * @param[in] req The tevent request to get the error from. * * @param[out] state A pointer to store the tevent request error state. * * @param[out] error A pointer to store the error set by tevent_req_error(). * * @return True if the function could set error and state, false * otherwise. * * @see tevent_req_error() */ bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state, uint64_t *error); /** * @brief Use as the last action of a _recv() function. * * This function destroys the attached private data. * * @param[in] req The finished request. */ void tevent_req_received(struct tevent_req *req); /** * @brief Mark a tevent_req for profiling * * This will turn on profiling for this tevent_req an all subreqs that * are directly started as helper requests off this * tevent_req. subreqs are chained by walking up the talloc_parent * hierarchy at a subreq's tevent_req_create. This means to get the * profiling chain right the subreq that needs to be profiled as part * of this tevent_req's profile must be a talloc child of the requests * state variable. * * @param[in] req The request to do tracing for * * @return False if the profile could not be activated */ bool tevent_req_set_profile(struct tevent_req *req); struct tevent_req_profile; /** * @brief Get the a request's profile for inspection * * @param[in] req The request to get the profile from * * @return The request's profile */ const struct tevent_req_profile *tevent_req_get_profile( struct tevent_req *req); /** * @brief Move the profile out of a request * * This function detaches the request's profile from the request, so * that the profile can outlive the request in a _recv function. * * @param[in] req The request to move the profile out of * @param[in] mem_ctx The new talloc context for the profile * * @return The moved profile */ struct tevent_req_profile *tevent_req_move_profile(struct tevent_req *req, TALLOC_CTX *mem_ctx); /** * @brief Get a profile description * * @param[in] profile The profile to be queried * @param[in] req_name The name of the request (state's name) * * "req_name" after this call is still in talloc-posession of "profile" */ void tevent_req_profile_get_name(const struct tevent_req_profile *profile, const char **req_name); /** * @brief Get a profile's start event data * * @param[in] profile The profile to be queried * @param[in] start_location The location where this event started * @param[in] start_time The time this event started * * "start_location" after this call is still in talloc-posession of "profile" */ void tevent_req_profile_get_start(const struct tevent_req_profile *profile, const char **start_location, struct timeval *start_time); /** * @brief Get a profile's stop event data * * @param[in] profile The profile to be queried * @param[in] stop_location The location where this event stopped * @param[in] stop_time The time this event stopped * * "stop_location" after this call is still in talloc-posession of "profile" */ void tevent_req_profile_get_stop(const struct tevent_req_profile *profile, const char **stop_location, struct timeval *stop_time); /** * @brief Get a profile's result data * * @param[in] pid The process where this profile was taken * @param[in] state The status the profile's tevent_req finished with * @param[in] user_error The user error of the profile's tevent_req */ void tevent_req_profile_get_status(const struct tevent_req_profile *profile, pid_t *pid, enum tevent_req_state *state, uint64_t *user_error); /** * @brief Retrieve the first subreq's profile from a profile * * @param[in] profile The profile to query * * @return The first tevent subreq's profile */ const struct tevent_req_profile *tevent_req_profile_get_subprofiles( const struct tevent_req_profile *profile); /** * @brief Walk the chain of subreqs * * @param[in] profile The subreq's profile to walk * * @return The next subprofile in the list */ const struct tevent_req_profile *tevent_req_profile_next( const struct tevent_req_profile *profile); /** * @brief Create a fresh tevent_req_profile * * @param[in] mem_ctx The talloc context to hang the fresh struct off * * @return The fresh struct */ struct tevent_req_profile *tevent_req_profile_create(TALLOC_CTX *mem_ctx); /** * @brief Set a profile's name * * @param[in] profile The profile to set the name for * @param[in] name The new name for the profile * * @return True if the internal talloc_strdup succeeded */ bool tevent_req_profile_set_name(struct tevent_req_profile *profile, const char *name); /** * @brief Set a profile's start event * * @param[in] profile The profile to set the start data for * @param[in] start_location The new start location * @param[in] start_time The new start time * * @return True if the internal talloc_strdup succeeded */ bool tevent_req_profile_set_start(struct tevent_req_profile *profile, const char *start_location, struct timeval start_time); /** * @brief Set a profile's stop event * * @param[in] profile The profile to set the stop data for * @param[in] stop_location The new stop location * @param[in] stop_time The new stop time * * @return True if the internal talloc_strdup succeeded */ bool tevent_req_profile_set_stop(struct tevent_req_profile *profile, const char *stop_location, struct timeval stop_time); /** * @brief Set a profile's exit status * * @param[in] profile The profile to set the exit status for * @param[in] pid The process where this profile was taken * @param[in] state The status the profile's tevent_req finished with * @param[in] user_error The user error of the profile's tevent_req */ void tevent_req_profile_set_status(struct tevent_req_profile *profile, pid_t pid, enum tevent_req_state state, uint64_t user_error); /** * @brief Add a subprofile to a profile * * @param[in] parent_profile The profile to be modified * @param[in] sub_profile The subreqs profile profile to be added * * "subreq" is talloc_move'ed into "parent_profile", so the talloc * ownership of "sub_profile" changes */ void tevent_req_profile_append_sub(struct tevent_req_profile *parent_profile, struct tevent_req_profile **sub_profile); /** * @brief Create a tevent subrequest at a given time. * * The idea is that always the same syntax for tevent requests. * * @param[in] mem_ctx The talloc memory context to use. * * @param[in] ev The event handle to setup the request. * * @param[in] wakeup_time The time to wakeup and execute the request. * * @return The new subrequest, NULL on error. * * Example: * @code * static void my_callback_wakeup_done(tevent_req *subreq) * { * struct tevent_req *req = tevent_req_callback_data(subreq, * struct tevent_req); * bool ok; * * ok = tevent_wakeup_recv(subreq); * TALLOC_FREE(subreq); * if (!ok) { * tevent_req_error(req, -1); * return; * } * ... * } * @endcode * * @code * subreq = tevent_wakeup_send(mem_ctx, ev, wakeup_time); * if (tevent_req_nomem(subreq, req)) { * return false; * } * tevent_set_callback(subreq, my_callback_wakeup_done, req); * @endcode * * @see tevent_wakeup_recv() */ struct tevent_req *tevent_wakeup_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct timeval wakeup_time); /** * @brief Check if the wakeup has been correctly executed. * * This function needs to be called in the callback function set after calling * tevent_wakeup_send(). * * @param[in] req The tevent request to check. * * @return True on success, false otherwise. * * @see tevent_wakeup_recv() */ bool tevent_wakeup_recv(struct tevent_req *req); /* @} */ /** * @defgroup tevent_helpers The tevent helper functions * @ingroup tevent * * @todo description * * @{ */ /** * @brief Compare two timeval values. * * @param[in] tv1 The first timeval value to compare. * * @param[in] tv2 The second timeval value to compare. * * @return 0 if they are equal. * 1 if the first time is greater than the second. * -1 if the first time is smaller than the second. */ int tevent_timeval_compare(const struct timeval *tv1, const struct timeval *tv2); /** * @brief Get a zero timeval value. * * @return A zero timeval value. */ struct timeval tevent_timeval_zero(void); /** * @brief Get a timeval value for the current time. * * @return A timeval value with the current time. */ struct timeval tevent_timeval_current(void); /** * @brief Get a timeval structure with the given values. * * @param[in] secs The seconds to set. * * @param[in] usecs The microseconds to set. * * @return A timeval structure with the given values. */ struct timeval tevent_timeval_set(uint32_t secs, uint32_t usecs); /** * @brief Get the difference between two timeval values. * * @param[in] tv1 The first timeval. * * @param[in] tv2 The second timeval. * * @return A timeval structure with the difference between the * first and the second value. */ struct timeval tevent_timeval_until(const struct timeval *tv1, const struct timeval *tv2); /** * @brief Check if a given timeval structure is zero. * * @param[in] tv The timeval to check if it is zero. * * @return True if it is zero, false otherwise. */ bool tevent_timeval_is_zero(const struct timeval *tv); /** * @brief Add the given amount of time to a timeval structure. * * @param[in] tv The timeval structure to add the time. * * @param[in] secs The seconds to add to the timeval. * * @param[in] usecs The microseconds to add to the timeval. * * @return The timeval structure with the new time. */ struct timeval tevent_timeval_add(const struct timeval *tv, uint32_t secs, uint32_t usecs); /** * @brief Get a timeval in the future with a specified offset from now. * * @param[in] secs The seconds of the offset from now. * * @param[in] usecs The microseconds of the offset from now. * * @return A timeval with the given offset in the future. */ struct timeval tevent_timeval_current_ofs(uint32_t secs, uint32_t usecs); /* @} */ /** * @defgroup tevent_queue The tevent queue functions * @ingroup tevent * * A tevent_queue is used to queue up async requests that must be * serialized. For example writing buffers into a socket must be * serialized. Writing a large lump of data into a socket can require * multiple write(2) or send(2) system calls. If more than one async * request is outstanding to write large buffers into a socket, every * request must individually be completed before the next one begins, * even if multiple syscalls are required. * * Take a look at @ref tevent_queue_tutorial for more details. * @{ */ struct tevent_queue; struct tevent_queue_entry; #ifdef DOXYGEN /** * @brief Create and start a tevent queue. * * @param[in] mem_ctx The talloc memory context to allocate the queue. * * @param[in] name The name to use to identify the queue. * * @return An allocated tevent queue on success, NULL on error. * * @see tevent_queue_start() * @see tevent_queue_stop() */ struct tevent_queue *tevent_queue_create(TALLOC_CTX *mem_ctx, const char *name); #else struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx, const char *name, const char *location); #define tevent_queue_create(_mem_ctx, _name) \ _tevent_queue_create((_mem_ctx), (_name), __location__) #endif /** * @brief A callback trigger function run by the queue. * * @param[in] req The tevent request the trigger function is executed on. * * @param[in] private_data The private data pointer specified by * tevent_queue_add(). * * @see tevent_queue_add() * @see tevent_queue_add_entry() * @see tevent_queue_add_optimize_empty() */ typedef void (*tevent_queue_trigger_fn_t)(struct tevent_req *req, void *private_data); /** * @brief Add a tevent request to the queue. * * @param[in] queue The queue to add the request. * * @param[in] ev The event handle to use for the request. * * @param[in] req The tevent request to add to the queue. * * @param[in] trigger The function triggered by the queue when the request * is called. Since tevent 0.9.14 it's possible to * pass NULL, in order to just add a "blocker" to the * queue. * * @param[in] private_data The private data passed to the trigger function. * * @return True if the request has been successfully added, false * otherwise. */ bool tevent_queue_add(struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data); /** * @brief Add a tevent request to the queue. * * The request can be removed from the queue by calling talloc_free() * (or a similar function) on the returned queue entry. This * is the only difference to tevent_queue_add(). * * @param[in] queue The queue to add the request. * * @param[in] ev The event handle to use for the request. * * @param[in] req The tevent request to add to the queue. * * @param[in] trigger The function triggered by the queue when the request * is called. Since tevent 0.9.14 it's possible to * pass NULL, in order to just add a "blocker" to the * queue. * * @param[in] private_data The private data passed to the trigger function. * * @return a pointer to the tevent_queue_entry if the request * has been successfully added, NULL otherwise. * * @see tevent_queue_add() * @see tevent_queue_add_optimize_empty() */ struct tevent_queue_entry *tevent_queue_add_entry( struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data); /** * @brief Add a tevent request to the queue using a possible optimization. * * This tries to optimize for the empty queue case and may calls * the trigger function directly. This is the only difference compared * to tevent_queue_add_entry(). * * The caller needs to be prepared that the trigger function has * already called tevent_req_notify_callback(), tevent_req_error(), * tevent_req_done() or a similar function. * * The trigger function has no chance to see the returned * queue_entry in the optimized case. * * The request can be removed from the queue by calling talloc_free() * (or a similar function) on the returned queue entry. * * @param[in] queue The queue to add the request. * * @param[in] ev The event handle to use for the request. * * @param[in] req The tevent request to add to the queue. * * @param[in] trigger The function triggered by the queue when the request * is called. Since tevent 0.9.14 it's possible to * pass NULL, in order to just add a "blocker" to the * queue. * * @param[in] private_data The private data passed to the trigger function. * * @return a pointer to the tevent_queue_entry if the request * has been successfully added, NULL otherwise. * * @see tevent_queue_add() * @see tevent_queue_add_entry() */ struct tevent_queue_entry *tevent_queue_add_optimize_empty( struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data); /** * @brief Untrigger an already triggered queue entry. * * If a trigger function detects that it needs to remain * in the queue, it needs to call tevent_queue_stop() * followed by tevent_queue_entry_untrigger(). * * @note In order to call tevent_queue_entry_untrigger() * the queue must be already stopped and the given queue_entry * must be the first one in the queue! Otherwise it calls abort(). * * @note You can't use this together with tevent_queue_add_optimize_empty() * because the trigger function don't have access to the quene entry * in the case of an empty queue. * * @param[in] queue_entry The queue entry to rearm. * * @see tevent_queue_add_entry() * @see tevent_queue_stop() */ void tevent_queue_entry_untrigger(struct tevent_queue_entry *entry); /** * @brief Start a tevent queue. * * The queue is started by default. * * @param[in] queue The queue to start. */ void tevent_queue_start(struct tevent_queue *queue); /** * @brief Stop a tevent queue. * * The queue is started by default. * * @param[in] queue The queue to stop. */ void tevent_queue_stop(struct tevent_queue *queue); /** * @brief Get the length of the queue. * * @param[in] queue The queue to get the length from. * * @return The number of elements. */ size_t tevent_queue_length(struct tevent_queue *queue); /** * @brief Is the tevent queue running. * * The queue is started by default. * * @param[in] queue The queue. * * @return Whether the queue is running or not.. */ bool tevent_queue_running(struct tevent_queue *queue); /** * @brief Create a tevent subrequest that waits in a tevent_queue * * The idea is that always the same syntax for tevent requests. * * @param[in] mem_ctx The talloc memory context to use. * * @param[in] ev The event handle to setup the request. * * @param[in] queue The queue to wait in. * * @return The new subrequest, NULL on error. * * @see tevent_queue_wait_recv() */ struct tevent_req *tevent_queue_wait_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct tevent_queue *queue); /** * @brief Check if we no longer need to wait in the queue. * * This function needs to be called in the callback function set after calling * tevent_queue_wait_send(). * * @param[in] req The tevent request to check. * * @return True on success, false otherwise. * * @see tevent_queue_wait_send() */ bool tevent_queue_wait_recv(struct tevent_req *req); typedef int (*tevent_nesting_hook)(struct tevent_context *ev, void *private_data, uint32_t level, bool begin, void *stack_ptr, const char *location); /** * @brief Create a tevent_thread_proxy for message passing between threads. * * The tevent_context must have been allocated on the NULL * talloc context, and talloc_disable_null_tracking() must * have been called. * * @param[in] dest_ev_ctx The tevent_context to receive events. * * @return An allocated tevent_thread_proxy, NULL on error. * If tevent was compiled without PTHREAD support * NULL is always returned and errno set to ENOSYS. * * @see tevent_thread_proxy_schedule() */ struct tevent_thread_proxy *tevent_thread_proxy_create( struct tevent_context *dest_ev_ctx); /** * @brief Schedule an immediate event on an event context from another thread. * * Causes dest_ev_ctx, being run by another thread, to receive an * immediate event calling the handler with the *pp_private parameter. * * *pp_im must be a pointer to an immediate event talloced on a context owned * by the calling thread, or the NULL context. Ownership will * be transferred to the tevent_thread_proxy and *pp_im will be returned as NULL. * * *pp_private_data must be a talloced area of memory with no destructors. * Ownership of this memory will be transferred to the tevent library and * *pp_private_data will be set to NULL on successful completion of * the call. Set pp_private to NULL if no parameter transfer * needed (a pure callback). This is an asynchronous request, caller * does not wait for callback to be completed before returning. * * @param[in] tp The tevent_thread_proxy to use. * * @param[in] pp_im Pointer to immediate event pointer. * * @param[in] handler The function that will be called. * * @param[in] pp_private_data The talloced memory to transfer. * * @see tevent_thread_proxy_create() */ void tevent_thread_proxy_schedule(struct tevent_thread_proxy *tp, struct tevent_immediate **pp_im, tevent_immediate_handler_t handler, void *pp_private_data); /* * @brief Create a context for threaded activation of immediates * * A tevent_treaded_context provides a link into an event * context. Using tevent_threaded_schedule_immediate, it is possible * to activate an immediate event from within a thread. * * It is the duty of the caller of tevent_threaded_context_create() to * keep the event context around longer than any * tevent_threaded_context. tevent will abort if ev is talloc_free'ed * with an active tevent_threaded_context. * * If tevent is build without pthread support, this always returns * NULL with errno=ENOSYS. * * @param[in] mem_ctx The talloc memory context to use. * @param[in] ev The event context to link this to. * @return The threaded context, or NULL with errno set. * * @see tevent_threaded_schedule_immediate() * * @note Available as of tevent 0.9.30 */ struct tevent_threaded_context *tevent_threaded_context_create( TALLOC_CTX *mem_ctx, struct tevent_context *ev); #ifdef DOXYGEN /* * @brief Activate an immediate from a thread * * Activate an immediate from within a thread. * * This routine does not watch out for talloc hierarchies. This means * that it is highly recommended to create the tevent_immediate in the * thread owning tctx, allocate a threaded job description for the * thread, hand over both pointers to a helper thread and not touch it * in the main thread at all anymore. * * tevent_threaded_schedule_immediate is intended as a job completion * indicator for simple threaded helpers. * * Please be aware that tevent_threaded_schedule_immediate is very * picky about its arguments: An immediate may not already be * activated and the handler must exist. With * tevent_threaded_schedule_immediate memory ownership is transferred * to the main thread holding the tevent context behind tctx, the * helper thread can't access it anymore. * * @param[in] tctx The threaded context to go through * @param[in] im The immediate event to activate * @param[in] handler The immediate handler to call in the main thread * @param[in] private_data Pointer for the immediate handler * * @see tevent_threaded_context_create() * * @note Available as of tevent 0.9.30 */ void tevent_threaded_schedule_immediate(struct tevent_threaded_context *tctx, struct tevent_immediate *im, tevent_immediate_handler_t handler, void *private_data); #else void _tevent_threaded_schedule_immediate(struct tevent_threaded_context *tctx, struct tevent_immediate *im, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location); #define tevent_threaded_schedule_immediate(tctx, im, handler, private_data) \ _tevent_threaded_schedule_immediate(tctx, im, handler, private_data, \ #handler, __location__); #endif #ifdef TEVENT_DEPRECATED #ifndef _DEPRECATED_ #if __has_attribute(deprecated) || (__GNUC__ >= 3) #define _DEPRECATED_ __attribute__ ((deprecated)) #else #define _DEPRECATED_ #endif #endif void tevent_loop_allow_nesting(struct tevent_context *ev) _DEPRECATED_; void tevent_loop_set_nesting_hook(struct tevent_context *ev, tevent_nesting_hook hook, void *private_data) _DEPRECATED_; int _tevent_loop_until(struct tevent_context *ev, bool (*finished)(void *private_data), void *private_data, const char *location) _DEPRECATED_; #define tevent_loop_until(ev, finished, private_data) \ _tevent_loop_until(ev, finished, private_data, __location__) #endif int tevent_re_initialise(struct tevent_context *ev); /* @} */ /** * @defgroup tevent_ops The tevent operation functions * @ingroup tevent * * The following structure and registration functions are exclusively * needed for people writing and pluggin a different event engine. * There is nothing useful for normal tevent user in here. * @{ */ struct tevent_ops { /* context init */ int (*context_init)(struct tevent_context *ev); /* fd_event functions */ struct tevent_fd *(*add_fd)(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location); void (*set_fd_close_fn)(struct tevent_fd *fde, tevent_fd_close_fn_t close_fn); uint16_t (*get_fd_flags)(struct tevent_fd *fde); void (*set_fd_flags)(struct tevent_fd *fde, uint16_t flags); /* timed_event functions */ struct tevent_timer *(*add_timer)(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location); /* immediate event functions */ void (*schedule_immediate)(struct tevent_immediate *im, struct tevent_context *ev, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location); /* signal functions */ struct tevent_signal *(*add_signal)(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data, const char *handler_name, const char *location); /* loop functions */ int (*loop_once)(struct tevent_context *ev, const char *location); int (*loop_wait)(struct tevent_context *ev, const char *location); }; bool tevent_register_backend(const char *name, const struct tevent_ops *ops); /* @} */ #ifdef TEVENT_DEPRECATED /** * @defgroup tevent_wrapper_ops The tevent wrapper operation functions * @ingroup tevent * * The following structure and registration functions are exclusively * needed for people writing wrapper functions for event handlers * e.g. wrappers can be used for debugging/profiling or impersonation. * * There is nothing useful for normal tevent user in here. * * @note That the close_fn() on tevent_fd is *NOT* wrapped! * * @see tevent_context_wrapper_create * @see tevent_fd_set_auto_close * @{ */ struct tevent_wrapper_ops { const char *name; bool (*before_use)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, const char *location); void (*after_use)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, const char *location); void (*before_fd_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_fd *fde, uint16_t flags, const char *handler_name, const char *location); void (*after_fd_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_fd *fde, uint16_t flags, const char *handler_name, const char *location); void (*before_timer_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_timer *te, struct timeval requested_time, struct timeval trigger_time, const char *handler_name, const char *location); void (*after_timer_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_timer *te, struct timeval requested_time, struct timeval trigger_time, const char *handler_name, const char *location); void (*before_immediate_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_immediate *im, const char *handler_name, const char *location); void (*after_immediate_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_immediate *im, const char *handler_name, const char *location); void (*before_signal_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_signal *se, int signum, int count, void *siginfo, const char *handler_name, const char *location); void (*after_signal_handler)(struct tevent_context *wrap_ev, void *private_state, struct tevent_context *main_ev, struct tevent_signal *se, int signum, int count, void *siginfo, const char *handler_name, const char *location); }; #ifdef DOXYGEN /** * @brief Create a wrapper tevent_context. * * @param[in] main_ev The main event context to work on. * * @param[in] mem_ctx The talloc memory context to use. * * @param[in] ops The tevent_wrapper_ops function table. * * @param[out] private_state The private state use by the wrapper functions. * * @param[in] private_type The talloc type of the private_state. * * @return The wrapper event context, NULL on error. * * @note Available as of tevent 0.9.37 * @note Deprecated as of tevent 0.9.38 */ struct tevent_context *tevent_context_wrapper_create(struct tevent_context *main_ev, TALLOC_CTX *mem_ctx, const struct tevent_wrapper_ops *ops, void **private_state, const char *private_type); #else struct tevent_context *_tevent_context_wrapper_create(struct tevent_context *main_ev, TALLOC_CTX *mem_ctx, const struct tevent_wrapper_ops *ops, void *pstate, size_t psize, const char *type, const char *location) _DEPRECATED_; #define tevent_context_wrapper_create(main_ev, mem_ctx, ops, state, type) \ _tevent_context_wrapper_create(main_ev, mem_ctx, ops, \ state, sizeof(type), #type, __location__) #endif /** * @brief Check if the event context is a wrapper event context. * * @param[in] ev The event context to work on. * * @return Is a wrapper (true), otherwise (false). * * @see tevent_context_wrapper_create() * * @note Available as of tevent 0.9.37 * @note Deprecated as of tevent 0.9.38 */ bool tevent_context_is_wrapper(struct tevent_context *ev) _DEPRECATED_; #ifdef DOXYGEN /** * @brief Prepare the environment of a (wrapper) event context. * * A caller might call this before passing a wrapper event context * to a tevent_req based *_send() function. * * The wrapper event context might do something like impersonation. * * tevent_context_push_use() must always be used in combination * with tevent_context_pop_use(). * * There is a global stack of currently active/busy wrapper event contexts. * Each wrapper can only appear once on that global stack! * The stack size is limited to 32 elements, which should be enough * for all useful scenarios. * * In addition to an explicit tevent_context_push_use() also * the invocation of an immediate, timer or fd handler implicitly * pushes the wrapper on the stack. * * Therefore there are some strict constraints for the usage of * tevent_context_push_use(): * - It must not be called from within an event handler * that already acts on the wrapper. * - tevent_context_pop_use() must be called before * leaving the code block that called tevent_context_push_use(). * - The caller is responsible ensure the correct stack ordering * - Any violation of these constraints results in calling * the abort handler of the given tevent context. * * Calling tevent_context_push_use() on a raw event context * still consumes an element on the stack, but it's otherwise * a no-op. * * If tevent_context_push_use() returns false, it means * that the wrapper's before_use() hook returned this failure, * in that case you must not call tevent_context_pop_use() as * the wrapper is not pushed onto the stack. * * @param[in] ev The event context to work on. * * @return Success (true) or failure (false). * * @note This is only needed if wrapper event contexts are in use. * * @see tevent_context_pop_use * * @note Available as of tevent 0.9.37 * @note Deprecated as of tevent 0.9.38 */ bool tevent_context_push_use(struct tevent_context *ev); #else bool _tevent_context_push_use(struct tevent_context *ev, const char *location) _DEPRECATED_; #define tevent_context_push_use(ev) \ _tevent_context_push_use(ev, __location__) #endif #ifdef DOXYGEN /** * @brief Release the environment of a (wrapper) event context. * * The wrapper event context might undo something like impersonation. * * This must be called after a succesful tevent_context_push_use(). * Any ordering violation results in calling * the abort handler of the given tevent context. * * This basically calls the wrapper's after_use() hook. * * @param[in] ev The event context to work on. * * @note This is only needed if wrapper event contexts are in use. * * @see tevent_context_push_use * * @note Available as of tevent 0.9.37 * @note Deprecated as of tevent 0.9.38 */ void tevent_context_pop_use(struct tevent_context *ev); #else void _tevent_context_pop_use(struct tevent_context *ev, const char *location) _DEPRECATED_; #define tevent_context_pop_use(ev) \ _tevent_context_pop_use(ev, __location__) #endif /** * @brief Check is the two context pointers belong to the same low level loop * * With the introduction of wrapper contexts it's not trivial * to check if two context pointers belong to the same low level * event loop. Some code may need to know this in order * to make some caching decisions. * * @param[in] ev1 The first event context. * @param[in] ev2 The second event context. * * @return true if both contexts belong to the same (still existing) context * loop, false otherwise. * * @see tevent_context_wrapper_create * * @note Available as of tevent 0.9.37 * @note Deprecated as of tevent 0.9.38 */ bool tevent_context_same_loop(struct tevent_context *ev1, struct tevent_context *ev2) _DEPRECATED_; /* @} */ #endif /* TEVENT_DEPRECATED */ /** * @defgroup tevent_compat The tevent compatibility functions * @ingroup tevent * * The following definitions are usueful only for compatibility with the * implementation originally developed within the samba4 code and will be * soon removed. Please NEVER use in new code. * * @todo Ignore it? * * @{ */ #ifdef TEVENT_COMPAT_DEFINES #define event_context tevent_context #define event_ops tevent_ops #define fd_event tevent_fd #define timed_event tevent_timer #define signal_event tevent_signal #define event_fd_handler_t tevent_fd_handler_t #define event_timed_handler_t tevent_timer_handler_t #define event_signal_handler_t tevent_signal_handler_t #define event_context_init(mem_ctx) \ tevent_context_init(mem_ctx) #define event_context_init_byname(mem_ctx, name) \ tevent_context_init_byname(mem_ctx, name) #define event_backend_list(mem_ctx) \ tevent_backend_list(mem_ctx) #define event_set_default_backend(backend) \ tevent_set_default_backend(backend) #define event_add_fd(ev, mem_ctx, fd, flags, handler, private_data) \ tevent_add_fd(ev, mem_ctx, fd, flags, handler, private_data) #define event_add_timed(ev, mem_ctx, next_event, handler, private_data) \ tevent_add_timer(ev, mem_ctx, next_event, handler, private_data) #define event_add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data) \ tevent_add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data) #define event_loop_once(ev) \ tevent_loop_once(ev) #define event_loop_wait(ev) \ tevent_loop_wait(ev) #define event_get_fd_flags(fde) \ tevent_fd_get_flags(fde) #define event_set_fd_flags(fde, flags) \ tevent_fd_set_flags(fde, flags) #define EVENT_FD_READ TEVENT_FD_READ #define EVENT_FD_WRITE TEVENT_FD_WRITE #define EVENT_FD_WRITEABLE(fde) \ TEVENT_FD_WRITEABLE(fde) #define EVENT_FD_READABLE(fde) \ TEVENT_FD_READABLE(fde) #define EVENT_FD_NOT_WRITEABLE(fde) \ TEVENT_FD_NOT_WRITEABLE(fde) #define EVENT_FD_NOT_READABLE(fde) \ TEVENT_FD_NOT_READABLE(fde) #define ev_debug_level tevent_debug_level #define EV_DEBUG_FATAL TEVENT_DEBUG_FATAL #define EV_DEBUG_ERROR TEVENT_DEBUG_ERROR #define EV_DEBUG_WARNING TEVENT_DEBUG_WARNING #define EV_DEBUG_TRACE TEVENT_DEBUG_TRACE #define ev_set_debug(ev, debug, context) \ tevent_set_debug(ev, debug, context) #define ev_set_debug_stderr(_ev) tevent_set_debug_stderr(ev) #endif /* TEVENT_COMPAT_DEFINES */ /* @} */ #endif /* __TEVENT_H__ */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/tevent.pc.in0000660000000000000000000000040700000000000015272 0ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: tevent Description: An event system library Version: @PACKAGE_VERSION@ Requires: talloc Libs: @LIB_RPATH@ -L${libdir} -ltevent Cflags: -I${includedir} URL: http://samba.org/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/tevent.py0000660000000000000000000000174400000000000014720 0ustar00rootroot00000000000000# # Python integration for tevent # # Copyright (C) Jelmer Vernooij 2011 # # ** NOTE! The following LGPL license applies to the tevent # ** library. This does NOT imply that all of Samba is released # ** under the LGPL # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see . from _tevent import ( __version__, backend_list, Context, Signal, ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_debug.c0000660000000000000000000001603300000000000015655 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Copyright (C) Andrew Tridgell 2005 Copyright (C) Jelmer Vernooij 2005 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "tevent.h" #include "tevent_internal.h" /******************************************************************** * Debug wrapper functions, modeled (with lot's of code copied as is) * after the ev debug wrapper functions ********************************************************************/ /* this allows the user to choose their own debug function */ int tevent_set_debug(struct tevent_context *ev, void (*debug)(void *context, enum tevent_debug_level level, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0), void *context) { if (ev->wrapper.glue != NULL) { ev = tevent_wrapper_main_ev(ev); tevent_abort(ev, "tevent_set_debug() on wrapper"); errno = EINVAL; return -1; } ev->debug_ops.debug = debug; ev->debug_ops.context = context; return 0; } /* debug function for ev_set_debug_stderr */ static void tevent_debug_stderr(void *private_data, enum tevent_debug_level level, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0); static void tevent_debug_stderr(void *private_data, enum tevent_debug_level level, const char *fmt, va_list ap) { if (level <= TEVENT_DEBUG_WARNING) { vfprintf(stderr, fmt, ap); } } /* convenience function to setup debug messages on stderr messages of level TEVENT_DEBUG_WARNING and higher are printed */ int tevent_set_debug_stderr(struct tevent_context *ev) { return tevent_set_debug(ev, tevent_debug_stderr, ev); } /* * log a message * * The default debug action is to ignore debugging messages. * This is the most appropriate action for a library. * Applications using the library must decide where to * redirect debugging messages */ void tevent_debug(struct tevent_context *ev, enum tevent_debug_level level, const char *fmt, ...) { va_list ap; if (!ev) { return; } if (ev->wrapper.glue != NULL) { ev = tevent_wrapper_main_ev(ev); } if (ev->debug_ops.debug == NULL) { return; } va_start(ap, fmt); ev->debug_ops.debug(ev->debug_ops.context, level, fmt, ap); va_end(ap); } void tevent_set_trace_callback(struct tevent_context *ev, tevent_trace_callback_t cb, void *private_data) { if (ev->wrapper.glue != NULL) { ev = tevent_wrapper_main_ev(ev); tevent_abort(ev, "tevent_set_trace_callback() on wrapper"); return; } ev->tracing.point.callback = cb; ev->tracing.point.private_data = private_data; } void tevent_get_trace_callback(struct tevent_context *ev, tevent_trace_callback_t *cb, void *private_data) { *cb = ev->tracing.point.callback; *(void**)private_data = ev->tracing.point.private_data; } void tevent_trace_point_callback(struct tevent_context *ev, enum tevent_trace_point tp) { if (ev->tracing.point.callback != NULL) { ev->tracing.point.callback(tp, ev->tracing.point.private_data); } } void tevent_set_trace_fd_callback(struct tevent_context *ev, tevent_trace_fd_callback_t cb, void *private_data) { if (ev->wrapper.glue != NULL) { ev = tevent_wrapper_main_ev(ev); tevent_abort(ev, "tevent_set_trace_fd_callback() on wrapper"); return; } ev->tracing.fde.callback = cb; ev->tracing.fde.private_data = private_data; } void tevent_get_trace_fd_callback(struct tevent_context *ev, tevent_trace_fd_callback_t *cb, void *p_private_data) { *cb = ev->tracing.fde.callback; *(void**)p_private_data = ev->tracing.fde.private_data; } void tevent_trace_fd_callback(struct tevent_context *ev, struct tevent_fd *fde, enum tevent_event_trace_point tp) { if (ev->tracing.fde.callback != NULL) { ev->tracing.fde.callback(fde, tp, ev->tracing.fde.private_data); } } void tevent_set_trace_signal_callback(struct tevent_context *ev, tevent_trace_signal_callback_t cb, void *private_data) { if (ev->wrapper.glue != NULL) { ev = tevent_wrapper_main_ev(ev); tevent_abort(ev, "tevent_set_trace_signal_callback() " "on wrapper"); return; } ev->tracing.se.callback = cb; ev->tracing.se.private_data = private_data; } void tevent_get_trace_signal_callback(struct tevent_context *ev, tevent_trace_signal_callback_t *cb, void *p_private_data) { *cb = ev->tracing.se.callback; *(void**)p_private_data = ev->tracing.se.private_data; } void tevent_trace_signal_callback(struct tevent_context *ev, struct tevent_signal *se, enum tevent_event_trace_point tp) { if (ev->tracing.se.callback != NULL) { ev->tracing.se.callback(se, tp, ev->tracing.se.private_data); } } void tevent_set_trace_timer_callback(struct tevent_context *ev, tevent_trace_timer_callback_t cb, void *private_data) { if (ev->wrapper.glue != NULL) { ev = tevent_wrapper_main_ev(ev); tevent_abort(ev, "tevent_set_trace_timer_callback() " "on wrapper"); return; } ev->tracing.te.callback = cb; ev->tracing.te.private_data = private_data; } void tevent_get_trace_timer_callback(struct tevent_context *ev, tevent_trace_timer_callback_t *cb, void *p_private_data) { *cb = ev->tracing.te.callback; *(void**)p_private_data = ev->tracing.te.private_data; } void tevent_trace_timer_callback(struct tevent_context *ev, struct tevent_timer *te, enum tevent_event_trace_point tp) { if (ev->tracing.te.callback != NULL) { ev->tracing.te.callback(te, tp, ev->tracing.te.private_data); } } void tevent_set_trace_immediate_callback(struct tevent_context *ev, tevent_trace_immediate_callback_t cb, void *private_data) { if (ev->wrapper.glue != NULL) { ev = tevent_wrapper_main_ev(ev); tevent_abort(ev, "tevent_set_trace_immediate_callback() " "on wrapper"); return; } ev->tracing.im.callback = cb; ev->tracing.im.private_data = private_data; } void tevent_get_trace_immediate_callback(struct tevent_context *ev, tevent_trace_immediate_callback_t *cb, void *p_private_data) { *cb = ev->tracing.im.callback; *(void**)p_private_data = ev->tracing.im.private_data; } void tevent_trace_immediate_callback(struct tevent_context *ev, struct tevent_immediate *im, enum tevent_event_trace_point tp) { if (ev->tracing.im.callback != NULL) { ev->tracing.im.callback(im, tp, ev->tracing.im.private_data); } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0212057 tevent-0.11.0/tevent_epoll.c0000660000000000000000000006216300000000000015707 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. main select loop and event handling - epoll implementation Copyright (C) Andrew Tridgell 2003-2005 Copyright (C) Stefan Metzmacher 2005-2013 Copyright (C) Jeremy Allison 2013 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/select.h" #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" struct epoll_event_context { /* a pointer back to the generic event_context */ struct tevent_context *ev; /* when using epoll this is the handle from epoll_create */ int epoll_fd; pid_t pid; bool panic_force_replay; bool *panic_state; bool (*panic_fallback)(struct tevent_context *ev, bool replay); }; #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0) #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1) #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2) #define EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX (1<<3) #ifdef TEST_PANIC_FALLBACK static int epoll_create_panic_fallback(struct epoll_event_context *epoll_ev, int size) { if (epoll_ev->panic_fallback == NULL) { return epoll_create(size); } /* 50% of the time, fail... */ if ((random() % 2) == 0) { errno = EINVAL; return -1; } return epoll_create(size); } static int epoll_ctl_panic_fallback(struct epoll_event_context *epoll_ev, int epfd, int op, int fd, struct epoll_event *event) { if (epoll_ev->panic_fallback == NULL) { return epoll_ctl(epfd, op, fd, event); } /* 50% of the time, fail... */ if ((random() % 2) == 0) { errno = EINVAL; return -1; } return epoll_ctl(epfd, op, fd, event); } static int epoll_wait_panic_fallback(struct epoll_event_context *epoll_ev, int epfd, struct epoll_event *events, int maxevents, int timeout) { if (epoll_ev->panic_fallback == NULL) { return epoll_wait(epfd, events, maxevents, timeout); } /* 50% of the time, fail... */ if ((random() % 2) == 0) { errno = EINVAL; return -1; } return epoll_wait(epfd, events, maxevents, timeout); } #define epoll_create(_size) \ epoll_create_panic_fallback(epoll_ev, _size) #define epoll_ctl(_epfd, _op, _fd, _event) \ epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event) #define epoll_wait(_epfd, _events, _maxevents, _timeout) \ epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout) #endif /* called to set the panic fallback function. */ _PRIVATE_ void tevent_epoll_set_panic_fallback(struct tevent_context *ev, bool (*panic_fallback)(struct tevent_context *ev, bool replay)) { struct epoll_event_context *epoll_ev = talloc_get_type_abort(ev->additional_data, struct epoll_event_context); epoll_ev->panic_fallback = panic_fallback; } /* called when a epoll call fails */ static void epoll_panic(struct epoll_event_context *epoll_ev, const char *reason, bool replay) { struct tevent_context *ev = epoll_ev->ev; bool (*panic_fallback)(struct tevent_context *ev, bool replay); panic_fallback = epoll_ev->panic_fallback; if (epoll_ev->panic_state != NULL) { *epoll_ev->panic_state = true; } if (epoll_ev->panic_force_replay) { replay = true; } TALLOC_FREE(ev->additional_data); if (panic_fallback == NULL) { tevent_debug(ev, TEVENT_DEBUG_FATAL, "%s (%s) replay[%u] - calling abort()\n", reason, strerror(errno), (unsigned)replay); abort(); } tevent_debug(ev, TEVENT_DEBUG_ERROR, "%s (%s) replay[%u] - calling panic_fallback\n", reason, strerror(errno), (unsigned)replay); if (!panic_fallback(ev, replay)) { /* Fallback failed. */ tevent_debug(ev, TEVENT_DEBUG_FATAL, "%s (%s) replay[%u] - calling abort()\n", reason, strerror(errno), (unsigned)replay); abort(); } } /* map from TEVENT_FD_* to EPOLLIN/EPOLLOUT */ static uint32_t epoll_map_flags(uint16_t flags) { uint32_t ret = 0; if (flags & TEVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP); if (flags & TEVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP); return ret; } /* free the epoll fd */ static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev) { close(epoll_ev->epoll_fd); epoll_ev->epoll_fd = -1; return 0; } /* init the epoll fd */ static int epoll_init_ctx(struct epoll_event_context *epoll_ev) { epoll_ev->epoll_fd = epoll_create(64); if (epoll_ev->epoll_fd == -1) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL, "Failed to create epoll handle.\n"); return -1; } if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING, "Failed to set close-on-exec, file descriptor may be leaked to children.\n"); } epoll_ev->pid = getpid(); talloc_set_destructor(epoll_ev, epoll_ctx_destructor); return 0; } static void epoll_update_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde); /* reopen the epoll handle when our pid changes see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an demonstration of why this is needed */ static void epoll_check_reopen(struct epoll_event_context *epoll_ev) { struct tevent_fd *fde; bool *caller_panic_state = epoll_ev->panic_state; bool panic_triggered = false; if (epoll_ev->pid == getpid()) { return; } close(epoll_ev->epoll_fd); epoll_ev->epoll_fd = epoll_create(64); if (epoll_ev->epoll_fd == -1) { epoll_panic(epoll_ev, "epoll_create() failed", false); return; } if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING, "Failed to set close-on-exec, file descriptor may be leaked to children.\n"); } epoll_ev->pid = getpid(); epoll_ev->panic_state = &panic_triggered; for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) { fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; epoll_update_event(epoll_ev, fde); if (panic_triggered) { if (caller_panic_state != NULL) { *caller_panic_state = true; } return; } } epoll_ev->panic_state = NULL; } /* epoll cannot add the same file descriptor twice, once with read, once with write which is allowed by the tevent backend. Multiplex the existing fde, flag it as such so we can search for the correct fde on event triggering. */ static int epoll_add_multiplex_fd(struct epoll_event_context *epoll_ev, struct tevent_fd *add_fde) { struct epoll_event event; struct tevent_fd *mpx_fde; int ret; /* Find the existing fde that caused the EEXIST error. */ for (mpx_fde = epoll_ev->ev->fd_events; mpx_fde; mpx_fde = mpx_fde->next) { if (mpx_fde->fd != add_fde->fd) { continue; } if (mpx_fde == add_fde) { continue; } break; } if (mpx_fde == NULL) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL, "can't find multiplex fde for fd[%d]", add_fde->fd); return -1; } if (mpx_fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX) { /* Logic error. Can't have more than 2 multiplexed fde's. */ tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL, "multiplex fde for fd[%d] is already multiplexed\n", mpx_fde->fd); return -1; } /* * The multiplex fde must have the same fd, and also * already have an epoll event attached. */ if (!(mpx_fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) { /* Logic error. Can't have more than 2 multiplexed fde's. */ tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL, "multiplex fde for fd[%d] has no event\n", mpx_fde->fd); return -1; } /* Modify the mpx_fde to add in the new flags. */ ZERO_STRUCT(event); event.events = epoll_map_flags(mpx_fde->flags); event.events |= epoll_map_flags(add_fde->flags); event.data.ptr = mpx_fde; ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, mpx_fde->fd, &event); if (ret != 0 && errno == EBADF) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_ERROR, "EPOLL_CTL_MOD EBADF for " "add_fde[%p] mpx_fde[%p] fd[%d] - disabling\n", add_fde, mpx_fde, add_fde->fd); DLIST_REMOVE(epoll_ev->ev->fd_events, mpx_fde); mpx_fde->wrapper = NULL; mpx_fde->event_ctx = NULL; DLIST_REMOVE(epoll_ev->ev->fd_events, add_fde); add_fde->wrapper = NULL; add_fde->event_ctx = NULL; return 0; } else if (ret != 0) { return ret; } /* * Make each fde->additional_data pointers point at each other * so we can look them up from each other. They are now paired. */ mpx_fde->additional_data = (struct tevent_fd *)add_fde; add_fde->additional_data = (struct tevent_fd *)mpx_fde; /* Now flag both fde's as being multiplexed. */ mpx_fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX; add_fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX; /* we need to keep the GOT_ERROR flag */ if (mpx_fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR) { add_fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR; } return 0; } /* add the epoll event to the given fd_event */ static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde) { struct epoll_event event; int ret; struct tevent_fd *mpx_fde = NULL; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * This is a multiplexed fde, we need to include both * flags in the modified event. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); mpx_fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; mpx_fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; } ZERO_STRUCT(event); event.events = epoll_map_flags(fde->flags); if (mpx_fde != NULL) { event.events |= epoll_map_flags(mpx_fde->flags); } event.data.ptr = fde; ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event); if (ret != 0 && errno == EBADF) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_ERROR, "EPOLL_CTL_ADD EBADF for " "fde[%p] mpx_fde[%p] fd[%d] - disabling\n", fde, mpx_fde, fde->fd); DLIST_REMOVE(epoll_ev->ev->fd_events, fde); fde->wrapper = NULL; fde->event_ctx = NULL; if (mpx_fde != NULL) { DLIST_REMOVE(epoll_ev->ev->fd_events, mpx_fde); mpx_fde->wrapper = NULL; mpx_fde->event_ctx = NULL; } return; } else if (ret != 0 && errno == EEXIST && mpx_fde == NULL) { ret = epoll_add_multiplex_fd(epoll_ev, fde); if (ret != 0) { epoll_panic(epoll_ev, "epoll_add_multiplex_fd failed", false); return; } } else if (ret != 0) { epoll_panic(epoll_ev, "EPOLL_CTL_ADD failed", false); return; } fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; /* only if we want to read we want to tell the event handler about errors */ if (fde->flags & TEVENT_FD_READ) { fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; } if (mpx_fde == NULL) { return; } mpx_fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; /* only if we want to read we want to tell the event handler about errors */ if (mpx_fde->flags & TEVENT_FD_READ) { mpx_fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; } } /* delete the epoll event for given fd_event */ static void epoll_del_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde) { struct epoll_event event; int ret; struct tevent_fd *mpx_fde = NULL; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * This is a multiplexed fde, we need to modify both events. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); mpx_fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; mpx_fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; } ZERO_STRUCT(event); ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event); if (ret != 0 && errno == ENOENT) { /* * This can happen after a epoll_check_reopen * within epoll_event_fd_destructor. */ tevent_debug(epoll_ev->ev, TEVENT_DEBUG_TRACE, "EPOLL_CTL_DEL ignoring ENOENT for fd[%d]\n", fde->fd); return; } else if (ret != 0 && errno == EBADF) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING, "EPOLL_CTL_DEL EBADF for " "fde[%p] mpx_fde[%p] fd[%d] - disabling\n", fde, mpx_fde, fde->fd); DLIST_REMOVE(epoll_ev->ev->fd_events, fde); fde->wrapper = NULL; fde->event_ctx = NULL; if (mpx_fde != NULL) { DLIST_REMOVE(epoll_ev->ev->fd_events, mpx_fde); mpx_fde->wrapper = NULL; mpx_fde->event_ctx = NULL; } return; } else if (ret != 0) { epoll_panic(epoll_ev, "EPOLL_CTL_DEL failed", false); return; } } /* change the epoll event to the given fd_event */ static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde) { struct tevent_fd *mpx_fde = NULL; struct epoll_event event; int ret; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * This is a multiplexed fde, we need to include both * flags in the modified event. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); mpx_fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; mpx_fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; } ZERO_STRUCT(event); event.events = epoll_map_flags(fde->flags); if (mpx_fde != NULL) { event.events |= epoll_map_flags(mpx_fde->flags); } event.data.ptr = fde; ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event); if (ret != 0 && errno == EBADF) { tevent_debug(epoll_ev->ev, TEVENT_DEBUG_ERROR, "EPOLL_CTL_MOD EBADF for " "fde[%p] mpx_fde[%p] fd[%d] - disabling\n", fde, mpx_fde, fde->fd); DLIST_REMOVE(epoll_ev->ev->fd_events, fde); fde->wrapper = NULL; fde->event_ctx = NULL; if (mpx_fde != NULL) { DLIST_REMOVE(epoll_ev->ev->fd_events, mpx_fde); mpx_fde->wrapper = NULL; mpx_fde->event_ctx = NULL; } return; } else if (ret != 0) { epoll_panic(epoll_ev, "EPOLL_CTL_MOD failed", false); return; } fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; /* only if we want to read we want to tell the event handler about errors */ if (fde->flags & TEVENT_FD_READ) { fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; } if (mpx_fde == NULL) { return; } mpx_fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; /* only if we want to read we want to tell the event handler about errors */ if (mpx_fde->flags & TEVENT_FD_READ) { mpx_fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR; } } static void epoll_update_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde) { bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR); bool want_read = (fde->flags & TEVENT_FD_READ); bool want_write= (fde->flags & TEVENT_FD_WRITE); struct tevent_fd *mpx_fde = NULL; if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * work out what the multiplexed fde wants. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); if (mpx_fde->flags & TEVENT_FD_READ) { want_read = true; } if (mpx_fde->flags & TEVENT_FD_WRITE) { want_write = true; } } /* there's already an event */ if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) { if (want_read || (want_write && !got_error)) { epoll_mod_event(epoll_ev, fde); return; } /* * if we want to match the select behavior, we need to remove the epoll_event * when the caller isn't interested in events. * * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them */ epoll_del_event(epoll_ev, fde); return; } /* there's no epoll_event attached to the fde */ if (want_read || (want_write && !got_error)) { epoll_add_event(epoll_ev, fde); return; } } /* Cope with epoll returning EPOLLHUP|EPOLLERR on an event. Return true if there's nothing else to do, false if this event needs further handling. */ static bool epoll_handle_hup_or_err(struct epoll_event_context *epoll_ev, struct tevent_fd *fde) { if (fde == NULL) { /* Nothing to do if no event. */ return true; } fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR; /* * if we only wait for TEVENT_FD_WRITE, we should not tell the * event handler about it, and remove the epoll_event, * as we only report errors when waiting for read events, * to match the select() behavior */ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) { /* * Do the same as the poll backend and * remove the writeable flag. */ fde->flags &= ~TEVENT_FD_WRITE; return true; } /* This has TEVENT_FD_READ set, we're not finished. */ return false; } /* event loop handling using epoll */ static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp) { int ret, i; #define MAXEVENTS 1 struct epoll_event events[MAXEVENTS]; int timeout = -1; int wait_errno; if (tvalp) { /* it's better to trigger timed events a bit later than too early */ timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000); } if (epoll_ev->ev->signal_events && tevent_common_check_signal(epoll_ev->ev)) { return 0; } tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_BEFORE_WAIT); ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout); wait_errno = errno; tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_AFTER_WAIT); if (ret == -1 && wait_errno == EINTR && epoll_ev->ev->signal_events) { if (tevent_common_check_signal(epoll_ev->ev)) { return 0; } } if (ret == -1 && wait_errno != EINTR) { epoll_panic(epoll_ev, "epoll_wait() failed", true); return -1; } if (ret == 0 && tvalp) { /* we don't care about a possible delay here */ tevent_common_loop_timer_delay(epoll_ev->ev); return 0; } for (i=0;iadditional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * Save off the multiplexed event in case we need * to use it to call the handler function. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); } if (events[i].events & (EPOLLHUP|EPOLLERR)) { bool handled_fde = epoll_handle_hup_or_err(epoll_ev, fde); bool handled_mpx = epoll_handle_hup_or_err(epoll_ev, mpx_fde); if (handled_fde && handled_mpx) { epoll_update_event(epoll_ev, fde); continue; } if (!handled_mpx) { /* * If the mpx event was the one that needs * further handling, it's the TEVENT_FD_READ * event so switch over and call that handler. */ fde = mpx_fde; mpx_fde = NULL; } flags |= TEVENT_FD_READ; } if (events[i].events & EPOLLIN) flags |= TEVENT_FD_READ; if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE; if (flags & TEVENT_FD_WRITE) { if (fde->flags & TEVENT_FD_WRITE) { mpx_fde = NULL; } if (mpx_fde && mpx_fde->flags & TEVENT_FD_WRITE) { fde = mpx_fde; mpx_fde = NULL; } } if (mpx_fde) { /* Ensure we got the right fde. */ if ((flags & fde->flags) == 0) { fde = mpx_fde; mpx_fde = NULL; } } /* * make sure we only pass the flags * the handler is expecting. */ flags &= fde->flags; if (flags) { return tevent_common_invoke_fd_handler(fde, flags, NULL); } } return 0; } /* create a epoll_event_context structure. */ static int epoll_event_context_init(struct tevent_context *ev) { int ret; struct epoll_event_context *epoll_ev; /* * We might be called during tevent_re_initialise() * which means we need to free our old additional_data. */ TALLOC_FREE(ev->additional_data); epoll_ev = talloc_zero(ev, struct epoll_event_context); if (!epoll_ev) return -1; epoll_ev->ev = ev; epoll_ev->epoll_fd = -1; ret = epoll_init_ctx(epoll_ev); if (ret != 0) { talloc_free(epoll_ev); return ret; } ev->additional_data = epoll_ev; return 0; } /* destroy an fd_event */ static int epoll_event_fd_destructor(struct tevent_fd *fde) { struct tevent_context *ev = fde->event_ctx; struct epoll_event_context *epoll_ev = NULL; bool panic_triggered = false; struct tevent_fd *mpx_fde = NULL; int flags = fde->flags; if (ev == NULL) { return tevent_common_fd_destructor(fde); } epoll_ev = talloc_get_type_abort(ev->additional_data, struct epoll_event_context); /* * we must remove the event from the list * otherwise a panic fallback handler may * reuse invalid memory */ DLIST_REMOVE(ev->fd_events, fde); if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX) { mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX; mpx_fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX; fde->additional_data = NULL; mpx_fde->additional_data = NULL; fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT; } epoll_ev->panic_state = &panic_triggered; epoll_check_reopen(epoll_ev); if (panic_triggered) { return tevent_common_fd_destructor(fde); } if (mpx_fde != NULL) { epoll_update_event(epoll_ev, mpx_fde); if (panic_triggered) { return tevent_common_fd_destructor(fde); } } fde->flags = 0; epoll_update_event(epoll_ev, fde); fde->flags = flags; if (panic_triggered) { return tevent_common_fd_destructor(fde); } epoll_ev->panic_state = NULL; return tevent_common_fd_destructor(fde); } /* add a fd based event return NULL on failure (memory allocation error) */ static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct epoll_event_context *epoll_ev = talloc_get_type_abort(ev->additional_data, struct epoll_event_context); struct tevent_fd *fde; bool panic_triggered = false; fde = tevent_common_add_fd(ev, mem_ctx, fd, flags, handler, private_data, handler_name, location); if (!fde) return NULL; talloc_set_destructor(fde, epoll_event_fd_destructor); epoll_ev->panic_state = &panic_triggered; epoll_check_reopen(epoll_ev); if (panic_triggered) { return fde; } epoll_ev->panic_state = NULL; epoll_update_event(epoll_ev, fde); return fde; } /* set the fd event flags */ static void epoll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags) { struct tevent_context *ev; struct epoll_event_context *epoll_ev; bool panic_triggered = false; if (fde->flags == flags) return; ev = fde->event_ctx; epoll_ev = talloc_get_type_abort(ev->additional_data, struct epoll_event_context); fde->flags = flags; epoll_ev->panic_state = &panic_triggered; epoll_check_reopen(epoll_ev); if (panic_triggered) { return; } epoll_ev->panic_state = NULL; epoll_update_event(epoll_ev, fde); } /* do a single event loop using the events defined in ev */ static int epoll_event_loop_once(struct tevent_context *ev, const char *location) { struct epoll_event_context *epoll_ev = talloc_get_type_abort(ev->additional_data, struct epoll_event_context); struct timeval tval; bool panic_triggered = false; if (ev->signal_events && tevent_common_check_signal(ev)) { return 0; } if (ev->threaded_contexts != NULL) { tevent_common_threaded_activate_immediate(ev); } if (ev->immediate_events && tevent_common_loop_immediate(ev)) { return 0; } tval = tevent_common_loop_timer_delay(ev); if (tevent_timeval_is_zero(&tval)) { return 0; } epoll_ev->panic_state = &panic_triggered; epoll_ev->panic_force_replay = true; epoll_check_reopen(epoll_ev); if (panic_triggered) { errno = EINVAL; return -1; } epoll_ev->panic_force_replay = false; epoll_ev->panic_state = NULL; return epoll_event_loop(epoll_ev, &tval); } static const struct tevent_ops epoll_event_ops = { .context_init = epoll_event_context_init, .add_fd = epoll_event_add_fd, .set_fd_close_fn = tevent_common_fd_set_close_fn, .get_fd_flags = tevent_common_fd_get_flags, .set_fd_flags = epoll_event_set_fd_flags, .add_timer = tevent_common_add_timer_v2, .schedule_immediate = tevent_common_schedule_immediate, .add_signal = tevent_common_add_signal, .loop_once = epoll_event_loop_once, .loop_wait = tevent_common_loop_wait, }; _PRIVATE_ bool tevent_epoll_init(void) { return tevent_register_backend("epoll", &epoll_event_ops); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_fd.c0000660000000000000000000001034500000000000015160 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. common events code for fd events Copyright (C) Stefan Metzmacher 2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #define TEVENT_DEPRECATED 1 #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" int tevent_common_fd_destructor(struct tevent_fd *fde) { if (fde->destroyed) { tevent_common_check_double_free(fde, "tevent_fd double free"); goto done; } fde->destroyed = true; if (fde->event_ctx) { tevent_trace_fd_callback(fde->event_ctx, fde, TEVENT_EVENT_TRACE_DETACH); DLIST_REMOVE(fde->event_ctx->fd_events, fde); } if (fde->close_fn) { fde->close_fn(fde->event_ctx, fde, fde->fd, fde->private_data); fde->fd = -1; fde->close_fn = NULL; } fde->event_ctx = NULL; done: if (fde->busy) { return -1; } fde->wrapper = NULL; return 0; } struct tevent_fd *tevent_common_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct tevent_fd *fde; /* tevent will crash later on select() if we save * a negative file descriptor. Better to fail here * so that consumers will be able to debug it */ if (fd < 0) return NULL; fde = talloc(mem_ctx?mem_ctx:ev, struct tevent_fd); if (!fde) return NULL; *fde = (struct tevent_fd) { .event_ctx = ev, .fd = fd, .flags = flags, .handler = handler, .private_data = private_data, .handler_name = handler_name, .location = location, }; tevent_trace_fd_callback(fde->event_ctx, fde, TEVENT_EVENT_TRACE_ATTACH); DLIST_ADD(ev->fd_events, fde); talloc_set_destructor(fde, tevent_common_fd_destructor); return fde; } uint16_t tevent_common_fd_get_flags(struct tevent_fd *fde) { return fde->flags; } void tevent_common_fd_set_flags(struct tevent_fd *fde, uint16_t flags) { if (fde->flags == flags) return; fde->flags = flags; } void tevent_common_fd_set_close_fn(struct tevent_fd *fde, tevent_fd_close_fn_t close_fn) { fde->close_fn = close_fn; } int tevent_common_invoke_fd_handler(struct tevent_fd *fde, uint16_t flags, bool *removed) { struct tevent_context *handler_ev = fde->event_ctx; if (removed != NULL) { *removed = false; } if (fde->event_ctx == NULL) { return 0; } fde->busy = true; if (fde->wrapper != NULL) { handler_ev = fde->wrapper->wrap_ev; tevent_wrapper_push_use_internal(handler_ev, fde->wrapper); fde->wrapper->ops->before_fd_handler( fde->wrapper->wrap_ev, fde->wrapper->private_state, fde->wrapper->main_ev, fde, flags, fde->handler_name, fde->location); } tevent_trace_fd_callback(fde->event_ctx, fde, TEVENT_EVENT_TRACE_BEFORE_HANDLER); fde->handler(handler_ev, fde, flags, fde->private_data); if (fde->wrapper != NULL) { fde->wrapper->ops->after_fd_handler( fde->wrapper->wrap_ev, fde->wrapper->private_state, fde->wrapper->main_ev, fde, flags, fde->handler_name, fde->location); tevent_wrapper_pop_use_internal(handler_ev, fde->wrapper); } fde->busy = false; if (fde->destroyed) { talloc_set_destructor(fde, NULL); TALLOC_FREE(fde); if (removed != NULL) { *removed = true; } } return 0; } void tevent_fd_set_tag(struct tevent_fd *fde, uint64_t tag) { if (fde == NULL) { return; } fde->tag = tag; } uint64_t tevent_fd_get_tag(const struct tevent_fd *fde) { if (fde == NULL) { return 0; } return fde->tag; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_immediate.c0000660000000000000000000001406700000000000016532 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. common events code for immediate events Copyright (C) Stefan Metzmacher 2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #define TEVENT_DEPRECATED 1 #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" static void tevent_common_immediate_cancel(struct tevent_immediate *im) { const char *create_location = im->create_location; bool busy = im->busy; uint64_t tag = im->tag; struct tevent_context *detach_ev_ctx = NULL; if (im->destroyed) { tevent_abort(im->event_ctx, "tevent_immediate use after free"); return; } if (im->detach_ev_ctx != NULL) { detach_ev_ctx = im->detach_ev_ctx; im->detach_ev_ctx = NULL; tevent_trace_immediate_callback(detach_ev_ctx, im, TEVENT_EVENT_TRACE_DETACH); return; } if (!im->event_ctx) { return; } if (im->handler_name != NULL) { tevent_debug(im->event_ctx, TEVENT_DEBUG_TRACE, "Cancel immediate event %p \"%s\"\n", im, im->handler_name); } /* let the backend free im->additional_data */ if (im->cancel_fn) { im->cancel_fn(im); } if (busy && im->handler_name == NULL) { detach_ev_ctx = im->event_ctx; } else { tevent_trace_immediate_callback(im->event_ctx, im, TEVENT_EVENT_TRACE_DETACH); } DLIST_REMOVE(im->event_ctx->immediate_events, im); *im = (struct tevent_immediate) { .create_location = create_location, .busy = busy, .tag = tag, .detach_ev_ctx = detach_ev_ctx, }; if (!busy) { talloc_set_destructor(im, NULL); } } /* destroy an immediate event */ static int tevent_common_immediate_destructor(struct tevent_immediate *im) { if (im->destroyed) { tevent_common_check_double_free(im, "tevent_immediate double free"); goto done; } tevent_common_immediate_cancel(im); im->destroyed = true; done: if (im->busy) { return -1; } return 0; } /* * schedule an immediate event on */ void tevent_common_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ev, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location) { const char *create_location = im->create_location; bool busy = im->busy; uint64_t tag = im->tag; struct tevent_wrapper_glue *glue = im->wrapper; tevent_common_immediate_cancel(im); if (!handler) { return; } *im = (struct tevent_immediate) { .event_ctx = ev, .wrapper = glue, .handler = handler, .private_data = private_data, .handler_name = handler_name, .create_location = create_location, .schedule_location = location, .busy = busy, .tag = tag, }; tevent_trace_immediate_callback(im->event_ctx, im, TEVENT_EVENT_TRACE_ATTACH); DLIST_ADD_END(ev->immediate_events, im); talloc_set_destructor(im, tevent_common_immediate_destructor); tevent_debug(ev, TEVENT_DEBUG_TRACE, "Schedule immediate event \"%s\": %p\n", handler_name, im); } int tevent_common_invoke_immediate_handler(struct tevent_immediate *im, bool *removed) { struct tevent_context *handler_ev = im->event_ctx; struct tevent_context *ev = im->event_ctx; struct tevent_immediate cur = *im; if (removed != NULL) { *removed = false; } tevent_debug(ev, TEVENT_DEBUG_TRACE, "Run immediate event \"%s\": %p\n", im->handler_name, im); /* * remember the handler and then clear the event * the handler might reschedule the event */ im->busy = true; im->handler_name = NULL; tevent_common_immediate_cancel(im); if (cur.wrapper != NULL) { handler_ev = cur.wrapper->wrap_ev; tevent_wrapper_push_use_internal(handler_ev, cur.wrapper); cur.wrapper->ops->before_immediate_handler( cur.wrapper->wrap_ev, cur.wrapper->private_state, cur.wrapper->main_ev, im, cur.handler_name, cur.schedule_location); } tevent_trace_immediate_callback(cur.event_ctx, im, TEVENT_EVENT_TRACE_BEFORE_HANDLER); cur.handler(handler_ev, im, cur.private_data); if (cur.wrapper != NULL) { cur.wrapper->ops->after_immediate_handler( cur.wrapper->wrap_ev, cur.wrapper->private_state, cur.wrapper->main_ev, im, cur.handler_name, cur.schedule_location); tevent_wrapper_pop_use_internal(handler_ev, cur.wrapper); } im->busy = false; /* The event was removed in tevent_common_immediate_cancel(). */ if (im->detach_ev_ctx != NULL) { struct tevent_context *detach_ev_ctx = im->detach_ev_ctx; im->detach_ev_ctx = NULL; tevent_trace_immediate_callback(detach_ev_ctx, im, TEVENT_EVENT_TRACE_DETACH); } if (im->destroyed) { talloc_set_destructor(im, NULL); TALLOC_FREE(im); if (removed != NULL) { *removed = true; } } return 0; } /* trigger the first immediate event and return true if no event was triggered return false */ bool tevent_common_loop_immediate(struct tevent_context *ev) { struct tevent_immediate *im = ev->immediate_events; int ret; if (!im) { return false; } ret = tevent_common_invoke_immediate_handler(im, NULL); if (ret != 0) { tevent_abort(ev, "tevent_common_invoke_immediate_handler() failed"); } return true; } void tevent_immediate_set_tag(struct tevent_immediate *im, uint64_t tag) { if (im == NULL) { return; } im->tag = tag; } uint64_t tevent_immediate_get_tag(const struct tevent_immediate *im) { if (im == NULL) { return 0; } return im->tag; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_internal.h0000660000000000000000000003447300000000000016420 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. generalised event loop handling INTERNAL STRUCTS. THERE ARE NO API GUARANTEES. External users should only ever have to include this header when implementing new tevent backends. Copyright (C) Stefan Metzmacher 2005-2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ struct tevent_req { /** * @brief What to do on completion * * This is used for the user of an async request, fn is called when * the request completes, either successfully or with an error. */ struct { /** * @brief Completion function * Completion function, to be filled by the API user */ tevent_req_fn fn; /** * @brief Private data for the completion function */ void *private_data; } async; /** * @brief Private state pointer for the actual implementation * * The implementation doing the work for the async request needs to * keep around current data like for example a fd event. The user of * an async request should not touch this. */ void *data; /** * @brief A function to overwrite the default print function * * The implementation doing the work may want to implement a * custom function to print the text representation of the async * request. */ tevent_req_print_fn private_print; /** * @brief A function to cancel the request * * The implementation might want to set a function * that is called when the tevent_req_cancel() function * was called. */ tevent_req_cancel_fn private_cancel; /** * @brief A function to cleanup the request * * The implementation might want to set a function * that is called before the tevent_req_done() and tevent_req_error() * trigger the callers callback function. */ struct { tevent_req_cleanup_fn fn; enum tevent_req_state state; } private_cleanup; /** * @brief Internal state of the request * * Callers should only access this via functions and never directly. */ struct { /** * @brief The talloc type of the data pointer * * This is filled by the tevent_req_create() macro. * * This for debugging only. */ const char *private_type; /** * @brief The location where the request was created * * This uses the __location__ macro via the tevent_req_create() * macro. * * This for debugging only. */ const char *create_location; /** * @brief The location where the request was finished * * This uses the __location__ macro via the tevent_req_done(), * tevent_req_error() or tevent_req_nomem() macro. * * This for debugging only. */ const char *finish_location; /** * @brief The location where the request was canceled * * This uses the __location__ macro via the * tevent_req_cancel() macro. * * This for debugging only. */ const char *cancel_location; /** * @brief The external state - will be queried by the caller * * While the async request is being processed, state will remain in * TEVENT_REQ_IN_PROGRESS. A request is finished if * req->state>=TEVENT_REQ_DONE. */ enum tevent_req_state state; /** * @brief status code when finished * * This status can be queried in the async completion function. It * will be set to 0 when everything went fine. */ uint64_t error; /** * @brief the immediate event used by tevent_req_post * */ struct tevent_immediate *trigger; /** * @brief An event context which will be used to * defer the _tevent_req_notify_callback(). */ struct tevent_context *defer_callback_ev; /** * @brief the timer event if tevent_req_set_endtime was used * */ struct tevent_timer *timer; /** * @brief The place where profiling data is kept */ struct tevent_req_profile *profile; } internal; }; struct tevent_req_profile { struct tevent_req_profile *prev, *next; struct tevent_req_profile *parent; const char *req_name; pid_t pid; const char *start_location; struct timeval start_time; const char *stop_location; struct timeval stop_time; enum tevent_req_state state; uint64_t user_error; struct tevent_req_profile *subprofiles; }; struct tevent_fd { struct tevent_fd *prev, *next; struct tevent_context *event_ctx; struct tevent_wrapper_glue *wrapper; bool busy; bool destroyed; int fd; uint16_t flags; /* see TEVENT_FD_* flags */ tevent_fd_handler_t handler; tevent_fd_close_fn_t close_fn; /* this is private for the specific handler */ void *private_data; /* this is for debugging only! */ const char *handler_name; const char *location; /* this is private for the events_ops implementation */ uint64_t additional_flags; void *additional_data; /* custom tag that can be set by caller */ uint64_t tag; }; struct tevent_timer { struct tevent_timer *prev, *next; struct tevent_context *event_ctx; struct tevent_wrapper_glue *wrapper; bool busy; bool destroyed; struct timeval next_event; tevent_timer_handler_t handler; /* this is private for the specific handler */ void *private_data; /* this is for debugging only! */ const char *handler_name; const char *location; /* this is private for the events_ops implementation */ void *additional_data; /* custom tag that can be set by caller */ uint64_t tag; }; struct tevent_immediate { struct tevent_immediate *prev, *next; struct tevent_context *event_ctx; struct tevent_wrapper_glue *wrapper; bool busy; bool destroyed; struct tevent_context *detach_ev_ctx; tevent_immediate_handler_t handler; /* this is private for the specific handler */ void *private_data; /* this is for debugging only! */ const char *handler_name; const char *create_location; const char *schedule_location; /* this is private for the events_ops implementation */ void (*cancel_fn)(struct tevent_immediate *im); void *additional_data; /* custom tag that can be set by caller */ uint64_t tag; }; struct tevent_signal { struct tevent_signal *prev, *next; struct tevent_context *event_ctx; struct tevent_wrapper_glue *wrapper; bool busy; bool destroyed; int signum; int sa_flags; tevent_signal_handler_t handler; /* this is private for the specific handler */ void *private_data; /* this is for debugging only! */ const char *handler_name; const char *location; /* this is private for the events_ops implementation */ void *additional_data; /* custom tag that can be set by caller */ uint64_t tag; }; struct tevent_threaded_context { struct tevent_threaded_context *next, *prev; #ifdef HAVE_PTHREAD pthread_mutex_t event_ctx_mutex; #endif struct tevent_context *event_ctx; }; struct tevent_debug_ops { void (*debug)(void *context, enum tevent_debug_level level, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0); void *context; }; void tevent_debug(struct tevent_context *ev, enum tevent_debug_level level, const char *fmt, ...) PRINTF_ATTRIBUTE(3,4); void tevent_abort(struct tevent_context *ev, const char *reason); void tevent_common_check_double_free(TALLOC_CTX *ptr, const char *reason); struct tevent_context { /* the specific events implementation */ const struct tevent_ops *ops; /* * The following three pointers are queried on every loop_once * in the order in which they appear here. Not measured, but * hopefully putting them at the top together with "ops" * should make tevent a *bit* more cache-friendly than before. */ /* list of signal events - used by common code */ struct tevent_signal *signal_events; /* List of threaded job indicators */ struct tevent_threaded_context *threaded_contexts; /* list of immediate events - used by common code */ struct tevent_immediate *immediate_events; /* list of fd events - used by common code */ struct tevent_fd *fd_events; /* list of timed events - used by common code */ struct tevent_timer *timer_events; /* List of scheduled immediates */ pthread_mutex_t scheduled_mutex; struct tevent_immediate *scheduled_immediates; /* this is private for the events_ops implementation */ void *additional_data; /* pipe hack used with signal handlers */ struct tevent_fd *wakeup_fde; int wakeup_fd; /* fd to write into */ #ifndef HAVE_EVENT_FD int wakeup_read_fd; #endif /* debugging operations */ struct tevent_debug_ops debug_ops; /* info about the nesting status */ struct { bool allowed; uint32_t level; tevent_nesting_hook hook_fn; void *hook_private; } nesting; struct { struct { tevent_trace_callback_t callback; void *private_data; } point; struct { tevent_trace_fd_callback_t callback; void *private_data; } fde; struct { tevent_trace_signal_callback_t callback; void *private_data; } se; struct { tevent_trace_timer_callback_t callback; void *private_data; } te; struct { tevent_trace_immediate_callback_t callback; void *private_data; } im; } tracing; struct { /* * This is used on the main event context */ struct tevent_wrapper_glue *list; /* * This is used on the wrapper event context */ struct tevent_wrapper_glue *glue; } wrapper; /* * an optimization pointer into timer_events * used by used by common code via * tevent_common_add_timer_v2() */ struct tevent_timer *last_zero_timer; #ifdef HAVE_PTHREAD struct tevent_context *prev, *next; #endif }; const struct tevent_ops *tevent_find_ops_byname(const char *name); int tevent_common_context_destructor(struct tevent_context *ev); int tevent_common_loop_wait(struct tevent_context *ev, const char *location); int tevent_common_fd_destructor(struct tevent_fd *fde); struct tevent_fd *tevent_common_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location); void tevent_common_fd_set_close_fn(struct tevent_fd *fde, tevent_fd_close_fn_t close_fn); uint16_t tevent_common_fd_get_flags(struct tevent_fd *fde); void tevent_common_fd_set_flags(struct tevent_fd *fde, uint16_t flags); int tevent_common_invoke_fd_handler(struct tevent_fd *fde, uint16_t flags, bool *removed); struct tevent_timer *tevent_common_add_timer(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location); struct tevent_timer *tevent_common_add_timer_v2(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location); struct timeval tevent_common_loop_timer_delay(struct tevent_context *); int tevent_common_invoke_timer_handler(struct tevent_timer *te, struct timeval current_time, bool *removed); void tevent_common_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ev, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location); int tevent_common_invoke_immediate_handler(struct tevent_immediate *im, bool *removed); bool tevent_common_loop_immediate(struct tevent_context *ev); void tevent_common_threaded_activate_immediate(struct tevent_context *ev); bool tevent_common_have_events(struct tevent_context *ev); int tevent_common_wakeup_init(struct tevent_context *ev); int tevent_common_wakeup_fd(int fd); int tevent_common_wakeup(struct tevent_context *ev); struct tevent_signal *tevent_common_add_signal(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data, const char *handler_name, const char *location); int tevent_common_check_signal(struct tevent_context *ev); void tevent_cleanup_pending_signal_handlers(struct tevent_signal *se); int tevent_common_invoke_signal_handler(struct tevent_signal *se, int signum, int count, void *siginfo, bool *removed); struct tevent_context *tevent_wrapper_main_ev(struct tevent_context *ev); struct tevent_wrapper_ops; struct tevent_wrapper_glue { struct tevent_wrapper_glue *prev, *next; struct tevent_context *wrap_ev; struct tevent_context *main_ev; bool busy; bool destroyed; const struct tevent_wrapper_ops *ops; void *private_state; }; void tevent_wrapper_push_use_internal(struct tevent_context *ev, struct tevent_wrapper_glue *wrapper); void tevent_wrapper_pop_use_internal(const struct tevent_context *__ev_ptr, struct tevent_wrapper_glue *wrapper); bool tevent_standard_init(void); bool tevent_poll_init(void); bool tevent_poll_event_add_fd_internal(struct tevent_context *ev, struct tevent_fd *fde); bool tevent_poll_mt_init(void); #ifdef HAVE_EPOLL bool tevent_epoll_init(void); void tevent_epoll_set_panic_fallback(struct tevent_context *ev, bool (*panic_fallback)(struct tevent_context *ev, bool replay)); #endif #ifdef HAVE_SOLARIS_PORTS bool tevent_port_init(void); #endif void tevent_trace_point_callback(struct tevent_context *ev, enum tevent_trace_point); void tevent_trace_fd_callback(struct tevent_context *ev, struct tevent_fd *fde, enum tevent_event_trace_point); void tevent_trace_signal_callback(struct tevent_context *ev, struct tevent_signal *se, enum tevent_event_trace_point); void tevent_trace_timer_callback(struct tevent_context *ev, struct tevent_timer *te, enum tevent_event_trace_point); void tevent_trace_immediate_callback(struct tevent_context *ev, struct tevent_immediate *im, enum tevent_event_trace_point); ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_liboop.c0000660000000000000000000001576000000000000016061 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. main select loop and event handling wrapper for http://git.lysator.liu.se/liboop/ Copyright (C) Stefan Metzmacher 2005 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "events.h" #include "events_internal.h" #include /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE: this code compiles fine, but is completely *UNTESTED* and is only committed as an example !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */ static int oop_event_context_destructor(struct tevent_context *ev) { oop_source_sys *oop_sys = ev->additional_data; oop_sys_delete(oop_sys); return 0; } /* create a oop_event_context structure. */ static int oop_event_context_init(struct tevent_context *ev, void *private_data) { oop_source_sys *oop_sys = private_data; if (!oop_sys) { oop_sys = oop_sys_new(); if (!oop_sys) { return -1; } talloc_set_destructor(ev, oop_event_context_destructor); } ev->additional_data = oop_sys; return 0; } static void *oop_event_fd_handler(oop_source *oop, int fd, oop_event oop_type, void *ptr) { struct tevent_fd *fde = ptr; if (fd != fde->fd) return OOP_ERROR; switch(oop_type) { case OOP_READ: fde->handler(fde->event_ctx, fde, EVENT_FD_READ, fde->private_data); return OOP_CONTINUE; case OOP_WRITE: fde->handler(fde->event_ctx, fde, EVENT_FD_WRITE, fde->private_data); return OOP_CONTINUE; case OOP_EXCEPTION: return OOP_ERROR; case OOP_NUM_EVENTS: return OOP_ERROR; } return OOP_ERROR; } /* destroy an fd_event */ static int oop_event_fd_destructor(struct tevent_fd *fde) { struct tevent_context *ev = fde->event_ctx; oop_source_sys *oop_sys = ev->additional_data; oop_source *oop = oop_sys_source(oop_sys); if (fde->flags & EVENT_FD_READ) oop->cancel_fd(oop, fde->fd, OOP_READ); if (fde->flags & EVENT_FD_WRITE) oop->cancel_fd(oop, fde->fd, OOP_WRITE); if (fde->flags & EVENT_FD_AUTOCLOSE) { close(fde->fd); fde->fd = -1; } return 0; } /* add a fd based event return NULL on failure (memory allocation error) */ static struct tevent_fd *oop_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, event_fd_handler_t handler, void *private_data) { struct tevent_fd *fde; oop_source_sys *oop_sys = ev->additional_data; oop_source *oop = oop_sys_source(oop_sys); fde = talloc(mem_ctx?mem_ctx:ev, struct tevent_fd); if (!fde) return NULL; fde->event_ctx = ev; fde->fd = fd; fde->flags = flags; fde->handler = handler; fde->private_data = private_data; fde->additional_flags = 0; fde->additional_data = NULL; if (fde->flags & EVENT_FD_READ) oop->on_fd(oop, fde->fd, OOP_READ, oop_event_fd_handler, fde); if (fde->flags & EVENT_FD_WRITE) oop->on_fd(oop, fde->fd, OOP_WRITE, oop_event_fd_handler, fde); talloc_set_destructor(fde, oop_event_fd_destructor); return fde; } /* return the fd event flags */ static uint16_t oop_event_get_fd_flags(struct tevent_fd *fde) { return fde->flags; } /* set the fd event flags */ static void oop_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags) { oop_source_sys *oop_sys; oop_source *oop; oop_sys = fde->event_ctx->additional_data; oop = oop_sys_source(oop_sys); if ((fde->flags & EVENT_FD_READ)&&(!(flags & EVENT_FD_READ))) oop->cancel_fd(oop, fde->fd, OOP_READ); if ((!(fde->flags & EVENT_FD_READ))&&(flags & EVENT_FD_READ)) oop->on_fd(oop, fde->fd, OOP_READ, oop_event_fd_handler, fde); if ((fde->flags & EVENT_FD_WRITE)&&(!(flags & EVENT_FD_WRITE))) oop->cancel_fd(oop, fde->fd, OOP_WRITE); if ((!(fde->flags & EVENT_FD_WRITE))&&(flags & EVENT_FD_WRITE)) oop->on_fd(oop, fde->fd, OOP_WRITE, oop_event_fd_handler, fde); fde->flags = flags; } static int oop_event_timed_destructor(struct tevent_timer *te); static int oop_event_timed_deny_destructor(struct tevent_timer *te) { return -1; } static void *oop_event_timed_handler(oop_source *oop, struct timeval t, void *ptr) { struct tevent_timer *te = ptr; /* deny the handler to free the event */ talloc_set_destructor(te, oop_event_timed_deny_destructor); te->handler(te->event_ctx, te, t, te->private_data); talloc_set_destructor(te, oop_event_timed_destructor); talloc_free(te); return OOP_CONTINUE; } /* destroy a timed event */ static int oop_event_timed_destructor(struct tevent_timer *te) { struct tevent_context *ev = te->event_ctx; oop_source_sys *oop_sys = ev->additional_data; oop_source *oop = oop_sys_source(oop_sys); oop->cancel_time(oop, te->next_event, oop_event_timed_handler, te); return 0; } /* add a timed event return NULL on failure (memory allocation error) */ static struct tevent_timer *oop_event_add_timed(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, event_timed_handler_t handler, void *private_data) { oop_source_sys *oop_sys = ev->additional_data; oop_source *oop = oop_sys_source(oop_sys); struct tevent_timer *te; te = talloc(mem_ctx?mem_ctx:ev, struct tevent_timer); if (te == NULL) return NULL; te->event_ctx = ev; te->next_event = next_event; te->handler = handler; te->private_data = private_data; te->additional_data = NULL; oop->on_time(oop, te->next_event, oop_event_timed_handler, te); talloc_set_destructor(te, oop_event_timed_destructor); return te; } /* do a single event loop using the events defined in ev */ static int oop_event_loop_once(struct tevent_context *ev) { void *oop_ret; oop_source_sys *oop_sys = ev->additional_data; oop_ret = oop_sys_run_once(oop_sys); if (oop_ret == OOP_CONTINUE) { return 0; } return -1; } /* return on failure or (with 0) if all fd events are removed */ static int oop_event_loop_wait(struct tevent_context *ev) { void *oop_ret; oop_source_sys *oop_sys = ev->additional_data; oop_ret = oop_sys_run(oop_sys); if (oop_ret == OOP_CONTINUE) { return 0; } return -1; } static const struct event_ops event_oop_ops = { .context_init = oop_event_context_init, .add_fd = oop_event_add_fd, .get_fd_flags = oop_event_get_fd_flags, .set_fd_flags = oop_event_set_fd_flags, .add_timer = oop_event_add_timed, .add_signal = common_event_add_signal, .loop_once = oop_event_loop_once, .loop_wait = oop_event_loop_wait, }; const struct event_ops *event_liboop_get_ops(void) { return &event_oop_ops; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_poll.c0000660000000000000000000003610600000000000015540 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. main select loop and event handling Copyright (C) Andrew Tridgell 2003-2005 Copyright (C) Stefan Metzmacher 2005-2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/select.h" #include "tevent.h" #include "tevent_util.h" #include "tevent_internal.h" struct poll_event_context { /* a pointer back to the generic event_context */ struct tevent_context *ev; /* * one or more events were deleted or disabled */ bool deleted; /* * These two arrays are maintained together. * * The following is always true: * num_fds <= num_fdes * * new 'fresh' elements are added at the end * of the 'fdes' array and picked up later * to the 'fds' array in poll_event_sync_arrays() * before the poll() syscall. */ struct pollfd *fds; size_t num_fds; struct tevent_fd **fdes; size_t num_fdes; /* * use tevent_common_wakeup(ev) to wake the poll() thread */ bool use_mt_mode; }; /* create a poll_event_context structure. */ static int poll_event_context_init(struct tevent_context *ev) { struct poll_event_context *poll_ev; /* * we might be called during tevent_re_initialise() * which means we need to free our old additional_data * in order to detach old fd events from the * poll_ev->fresh list */ TALLOC_FREE(ev->additional_data); poll_ev = talloc_zero(ev, struct poll_event_context); if (poll_ev == NULL) { return -1; } poll_ev->ev = ev; ev->additional_data = poll_ev; return 0; } static int poll_event_context_init_mt(struct tevent_context *ev) { struct poll_event_context *poll_ev; int ret; ret = poll_event_context_init(ev); if (ret == -1) { return ret; } poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); ret = tevent_common_wakeup_init(ev); if (ret != 0) { return ret; } poll_ev->use_mt_mode = true; return 0; } static void poll_event_wake_pollthread(struct poll_event_context *poll_ev) { if (!poll_ev->use_mt_mode) { return; } tevent_common_wakeup(poll_ev->ev); } /* destroy an fd_event */ static int poll_event_fd_destructor(struct tevent_fd *fde) { struct tevent_context *ev = fde->event_ctx; struct poll_event_context *poll_ev; uint64_t del_idx = fde->additional_flags; if (ev == NULL) { goto done; } poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); if (del_idx == UINT64_MAX) { goto done; } poll_ev->fdes[del_idx] = NULL; poll_ev->deleted = true; poll_event_wake_pollthread(poll_ev); done: return tevent_common_fd_destructor(fde); } static void poll_event_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ev, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct poll_event_context *poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); tevent_common_schedule_immediate(im, ev, handler, private_data, handler_name, location); poll_event_wake_pollthread(poll_ev); } /* Private function called by "standard" backend fallback. Note this only allows fallback to "poll" backend, not "poll-mt". */ _PRIVATE_ bool tevent_poll_event_add_fd_internal(struct tevent_context *ev, struct tevent_fd *fde) { struct poll_event_context *poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); uint64_t fde_idx = UINT64_MAX; size_t num_fdes; fde->additional_flags = UINT64_MAX; talloc_set_destructor(fde, poll_event_fd_destructor); if (fde->flags == 0) { /* * Nothing more to do... */ return true; } /* * We need to add it to the end of the 'fdes' array. */ num_fdes = poll_ev->num_fdes + 1; if (num_fdes > talloc_array_length(poll_ev->fdes)) { struct tevent_fd **tmp_fdes = NULL; size_t array_length; array_length = (num_fdes + 15) & ~15; /* round up to 16 */ tmp_fdes = talloc_realloc(poll_ev, poll_ev->fdes, struct tevent_fd *, array_length); if (tmp_fdes == NULL) { return false; } poll_ev->fdes = tmp_fdes; } fde_idx = poll_ev->num_fdes; fde->additional_flags = fde_idx; poll_ev->fdes[fde_idx] = fde; poll_ev->num_fdes++; return true; } /* add a fd based event return NULL on failure (memory allocation error) */ static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct poll_event_context *poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); struct tevent_fd *fde; bool ok; if (fd < 0) { return NULL; } fde = tevent_common_add_fd(ev, mem_ctx, fd, flags, handler, private_data, handler_name, location); if (fde == NULL) { return NULL; } ok = tevent_poll_event_add_fd_internal(ev, fde); if (!ok) { TALLOC_FREE(fde); return NULL; } poll_event_wake_pollthread(poll_ev); /* * poll_event_loop_poll will take care of the rest in * poll_event_setup_fresh */ return fde; } /* set the fd event flags */ static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags) { struct tevent_context *ev = fde->event_ctx; struct poll_event_context *poll_ev; uint64_t idx = fde->additional_flags; uint16_t pollflags; if (ev == NULL) { return; } if (fde->flags == flags) { return; } poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); fde->flags = flags; if (idx == UINT64_MAX) { /* * We move it between the fresh and disabled lists. */ tevent_poll_event_add_fd_internal(ev, fde); poll_event_wake_pollthread(poll_ev); return; } if (fde->flags == 0) { /* * We need to remove it from the array * and move it to the disabled list. */ poll_ev->fdes[idx] = NULL; poll_ev->deleted = true; fde->additional_flags = UINT64_MAX; poll_event_wake_pollthread(poll_ev); return; } if (idx >= poll_ev->num_fds) { /* * Not yet added to the * poll_ev->fds array. */ poll_event_wake_pollthread(poll_ev); return; } pollflags = 0; if (flags & TEVENT_FD_READ) { pollflags |= (POLLIN|POLLHUP); } if (flags & TEVENT_FD_WRITE) { pollflags |= (POLLOUT); } poll_ev->fds[idx].events = pollflags; poll_event_wake_pollthread(poll_ev); } static bool poll_event_sync_arrays(struct tevent_context *ev, struct poll_event_context *poll_ev) { size_t i; size_t array_length; if (poll_ev->deleted) { for (i=0; i < poll_ev->num_fds;) { struct tevent_fd *fde = poll_ev->fdes[i]; size_t ci; if (fde != NULL) { i++; continue; } /* * This fde was talloc_free()'ed. Delete it * from the arrays */ poll_ev->num_fds -= 1; ci = poll_ev->num_fds; if (ci > i) { poll_ev->fds[i] = poll_ev->fds[ci]; poll_ev->fdes[i] = poll_ev->fdes[ci]; if (poll_ev->fdes[i] != NULL) { poll_ev->fdes[i]->additional_flags = i; } } poll_ev->fds[ci] = (struct pollfd) { .fd = -1 }; poll_ev->fdes[ci] = NULL; } poll_ev->deleted = false; } if (poll_ev->num_fds == poll_ev->num_fdes) { return true; } /* * Recheck the size of both arrays and make sure * poll_fd->fds array has at least the size of the * in use poll_ev->fdes array. */ if (poll_ev->num_fdes > talloc_array_length(poll_ev->fds)) { struct pollfd *tmp_fds = NULL; /* * Make sure both allocated the same length. */ array_length = talloc_array_length(poll_ev->fdes); tmp_fds = talloc_realloc(poll_ev, poll_ev->fds, struct pollfd, array_length); if (tmp_fds == NULL) { return false; } poll_ev->fds = tmp_fds; } /* * Now setup the new elements. */ for (i = poll_ev->num_fds; i < poll_ev->num_fdes; i++) { struct tevent_fd *fde = poll_ev->fdes[i]; struct pollfd *pfd = &poll_ev->fds[poll_ev->num_fds]; if (fde == NULL) { continue; } if (i > poll_ev->num_fds) { poll_ev->fdes[poll_ev->num_fds] = fde; fde->additional_flags = poll_ev->num_fds; poll_ev->fdes[i] = NULL; } pfd->fd = fde->fd; pfd->events = 0; pfd->revents = 0; if (fde->flags & TEVENT_FD_READ) { pfd->events |= (POLLIN|POLLHUP); } if (fde->flags & TEVENT_FD_WRITE) { pfd->events |= (POLLOUT); } poll_ev->num_fds += 1; } /* Both are in sync again */ poll_ev->num_fdes = poll_ev->num_fds; /* * Check if we should shrink the arrays * But keep at least 16 elements. */ array_length = (poll_ev->num_fds + 15) & ~15; /* round up to 16 */ array_length = MAX(array_length, 16); if (array_length < talloc_array_length(poll_ev->fdes)) { struct tevent_fd **tmp_fdes = NULL; struct pollfd *tmp_fds = NULL; tmp_fdes = talloc_realloc(poll_ev, poll_ev->fdes, struct tevent_fd *, array_length); if (tmp_fdes == NULL) { return false; } poll_ev->fdes = tmp_fdes; tmp_fds = talloc_realloc(poll_ev, poll_ev->fds, struct pollfd, array_length); if (tmp_fds == NULL) { return false; } poll_ev->fds = tmp_fds; } return true; } /* event loop handling using poll() */ static int poll_event_loop_poll(struct tevent_context *ev, struct timeval *tvalp) { struct poll_event_context *poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); int pollrtn; int timeout = -1; int poll_errno; struct tevent_fd *fde = NULL; struct tevent_fd *next = NULL; unsigned i; bool ok; if (ev->signal_events && tevent_common_check_signal(ev)) { return 0; } if (tvalp != NULL) { timeout = tvalp->tv_sec * 1000; timeout += (tvalp->tv_usec + 999) / 1000; } ok = poll_event_sync_arrays(ev, poll_ev); if (!ok) { return -1; } tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT); pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout); poll_errno = errno; tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT); if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) { tevent_common_check_signal(ev); return 0; } if (pollrtn == 0 && tvalp) { /* we don't care about a possible delay here */ tevent_common_loop_timer_delay(ev); return 0; } if (pollrtn <= 0) { /* * No fd's ready */ return 0; } /* at least one file descriptor is ready - check which ones and call the handler, being careful to allow the handler to remove itself when called */ for (fde = ev->fd_events; fde; fde = next) { uint64_t idx = fde->additional_flags; struct pollfd *pfd; uint16_t flags = 0; next = fde->next; if (idx == UINT64_MAX) { continue; } pfd = &poll_ev->fds[idx]; if (pfd->revents & POLLNVAL) { /* * the socket is dead! this should never * happen as the socket should have first been * made readable and that should have removed * the event, so this must be a bug. * * We ignore it here to match the epoll * behavior. */ tevent_debug(ev, TEVENT_DEBUG_ERROR, "POLLNVAL on fde[%p] fd[%d] - disabling\n", fde, pfd->fd); poll_ev->fdes[idx] = NULL; poll_ev->deleted = true; DLIST_REMOVE(ev->fd_events, fde); fde->wrapper = NULL; fde->event_ctx = NULL; continue; } if (pfd->revents & (POLLHUP|POLLERR)) { /* If we only wait for TEVENT_FD_WRITE, we should not tell the event handler about it, and remove the writable flag, as we only report errors when waiting for read events to match the select behavior. */ if (!(fde->flags & TEVENT_FD_READ)) { TEVENT_FD_NOT_WRITEABLE(fde); continue; } flags |= TEVENT_FD_READ; } if (pfd->revents & POLLIN) { flags |= TEVENT_FD_READ; } if (pfd->revents & POLLOUT) { flags |= TEVENT_FD_WRITE; } /* * Note that fde->flags could be changed when using * the poll_mt backend together with threads, * that why we need to check pfd->revents and fde->flags */ flags &= fde->flags; if (flags != 0) { DLIST_DEMOTE(ev->fd_events, fde); return tevent_common_invoke_fd_handler(fde, flags, NULL); } } for (i = 0; i < poll_ev->num_fds; i++) { if (poll_ev->fds[i].revents & POLLNVAL) { /* * the socket is dead! this should never * happen as the socket should have first been * made readable and that should have removed * the event, so this must be a bug or * a race in the poll_mt usage. */ fde = poll_ev->fdes[i]; tevent_debug(ev, TEVENT_DEBUG_WARNING, "POLLNVAL on dangling fd[%d] fde[%p] - disabling\n", poll_ev->fds[i].fd, fde); poll_ev->fdes[i] = NULL; poll_ev->deleted = true; if (fde != NULL) { DLIST_REMOVE(ev->fd_events, fde); fde->wrapper = NULL; fde->event_ctx = NULL; } } } return 0; } /* do a single event loop using the events defined in ev */ static int poll_event_loop_once(struct tevent_context *ev, const char *location) { struct timeval tval; if (ev->signal_events && tevent_common_check_signal(ev)) { return 0; } if (ev->threaded_contexts != NULL) { tevent_common_threaded_activate_immediate(ev); } if (ev->immediate_events && tevent_common_loop_immediate(ev)) { return 0; } tval = tevent_common_loop_timer_delay(ev); if (tevent_timeval_is_zero(&tval)) { return 0; } return poll_event_loop_poll(ev, &tval); } static const struct tevent_ops poll_event_ops = { .context_init = poll_event_context_init, .add_fd = poll_event_add_fd, .set_fd_close_fn = tevent_common_fd_set_close_fn, .get_fd_flags = tevent_common_fd_get_flags, .set_fd_flags = poll_event_set_fd_flags, .add_timer = tevent_common_add_timer_v2, .schedule_immediate = tevent_common_schedule_immediate, .add_signal = tevent_common_add_signal, .loop_once = poll_event_loop_once, .loop_wait = tevent_common_loop_wait, }; _PRIVATE_ bool tevent_poll_init(void) { return tevent_register_backend("poll", &poll_event_ops); } static const struct tevent_ops poll_event_mt_ops = { .context_init = poll_event_context_init_mt, .add_fd = poll_event_add_fd, .set_fd_close_fn = tevent_common_fd_set_close_fn, .get_fd_flags = tevent_common_fd_get_flags, .set_fd_flags = poll_event_set_fd_flags, .add_timer = tevent_common_add_timer_v2, .schedule_immediate = poll_event_schedule_immediate, .add_signal = tevent_common_add_signal, .loop_once = poll_event_loop_once, .loop_wait = tevent_common_loop_wait, }; _PRIVATE_ bool tevent_poll_mt_init(void) { return tevent_register_backend("poll_mt", &poll_event_mt_ops); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_port.c0000660000000000000000000004766200000000000015567 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Main select loop and event handling - Solaris port implementation. Losely based on the Linux epoll backend. Copyright (C) Jeremy Allison 2013 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/select.h" #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" struct port_associate_vals { struct port_associate_vals *prev, *next; struct port_event_context *port_ev; int events; struct tevent_fd *fde; bool associated_event; }; struct port_event_context { /* a pointer back to the generic event_context */ struct tevent_context *ev; /* This is the handle from port_create */ int port_fd; pid_t pid; /* List of associations. */ struct port_associate_vals *po_vals; }; #define PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION (1<<0) #define PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1) #define PORT_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2) #define PORT_ADDITIONAL_FD_FLAG_HAS_MPX (1<<3) /* Map from TEVENT_FD_* to POLLIN/POLLOUT */ static int port_map_flags(uint16_t flags) { int ret = 0; if (flags & TEVENT_FD_READ) ret |= (POLLIN | POLLERR | POLLHUP); if (flags & TEVENT_FD_WRITE) ret |= (POLLOUT | POLLERR | POLLHUP); return ret; } /* Free the port fd */ static int port_ctx_destructor(struct port_event_context *port_ev) { close(port_ev->port_fd); port_ev->port_fd = -1; return 0; } /* Init the port fd */ static int port_init_ctx(struct port_event_context *port_ev) { port_ev->port_fd = port_create(); if (port_ev->port_fd == -1) { tevent_debug(port_ev->ev, TEVENT_DEBUG_FATAL, "Failed to create port handle.\n"); return -1; } if (!ev_set_close_on_exec(port_ev->port_fd)) { tevent_debug(port_ev->ev, TEVENT_DEBUG_WARNING, "Failed to set close-on-exec, file descriptor may be leaked to children.\n"); } port_ev->pid = getpid(); talloc_set_destructor(port_ev, port_ctx_destructor); return 0; } /* Functions to manage the lower level cache of associated events on the port_fd. */ static int port_associate_vals_destructor(struct port_associate_vals *val) { DLIST_REMOVE(val->port_ev->po_vals, val); memset(val, '\0', sizeof(struct port_associate_vals)); return 0; } /* * TODO: As the port_association is per-fde, it should be possible to store it * directly in fde->additional_data, alongside any multiplexed-fde. That way the * lookup on store and delete would be avoided, and associate_all_events() could * walk the ev->fd_events list. */ static bool store_port_association(struct port_event_context *port_ev, struct tevent_fd *fde, int events) { struct port_associate_vals *val; for (val = port_ev->po_vals; val; val = val->next) { if (val->fde->fd == fde->fd) { /* Association already attached to fd. */ if (val->events != events) { val->events = events; val->associated_event = false; } return true; } } val = talloc_zero(port_ev, struct port_associate_vals); if (val == NULL) { return false; } val->port_ev = port_ev; val->fde = fde; val->events = events; val->associated_event = false; DLIST_ADD(port_ev->po_vals, val); talloc_set_destructor(val, port_associate_vals_destructor); return true; } static void delete_port_association(struct port_event_context *port_ev, struct tevent_fd *fde) { struct port_associate_vals *val; for (val = port_ev->po_vals; val; val = val->next) { if (val->fde == fde) { if (val->associated_event) { (void)port_dissociate(port_ev->port_fd, PORT_SOURCE_FD, fde->fd); } talloc_free(val); return; } } } static int associate_all_events(struct port_event_context *port_ev) { struct port_associate_vals *val; for (val = port_ev->po_vals; val; val = val->next) { int ret; if (val->associated_event) { continue; } ret = port_associate(port_ev->port_fd, PORT_SOURCE_FD, (uintptr_t)val->fde->fd, val->events, (void *)val); if (ret != 0) { return -1; } val->associated_event = true; } return 0; } static int port_update_event(struct port_event_context *port_ev, struct tevent_fd *fde); /* Reopen the port handle when our pid changes. */ static int port_check_reopen(struct port_event_context *port_ev) { struct tevent_fd *fde; if (port_ev->pid == getpid()) { return 0; } close(port_ev->port_fd); port_ev->port_fd = port_create(); if (port_ev->port_fd == -1) { tevent_debug(port_ev->ev, TEVENT_DEBUG_FATAL, "port_create() failed"); return -1; } if (!ev_set_close_on_exec(port_ev->port_fd)) { tevent_debug(port_ev->ev, TEVENT_DEBUG_WARNING, "Failed to set close-on-exec, file descriptor may be leaked to children.\n"); } port_ev->pid = getpid(); for (fde=port_ev->ev->fd_events;fde;fde=fde->next) { fde->additional_flags &= PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; if (port_update_event(port_ev, fde) != 0) { return -1; } } return 0; } /* * Solaris ports cannot add the same file descriptor twice, once * with read, once with write which is allowed by the tevent backend. * Multiplex the existing fde, flag it as such so we can search for the * correct fde on event triggering. */ static void port_setup_multiplex_fd(struct port_event_context *port_ev, struct tevent_fd *add_fde, struct tevent_fd *mpx_fde) { /* * Make each fde->additional_data pointers point at each other * so we can look them up from each other. They are now paired. */ mpx_fde->additional_data = add_fde; add_fde->additional_data = mpx_fde; /* Now flag both fde's as being multiplexed. */ mpx_fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_HAS_MPX; add_fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_HAS_MPX; /* We need to keep the GOT_ERROR flag. */ if (mpx_fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_GOT_ERROR) { add_fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_GOT_ERROR; } } /* Add the port event to the given fd_event, Or modify an existing event. */ static int port_add_event(struct port_event_context *port_ev, struct tevent_fd *fde) { int flags = port_map_flags(fde->flags); struct tevent_fd *mpx_fde = NULL; fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR; if (fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * This is already a multiplexed fde, we need to include both * flags in the modified event. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); mpx_fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; mpx_fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR; flags |= port_map_flags(mpx_fde->flags); } else { /* * Not (yet) a multiplexed event. See if there * is already an event with the same fd. */ for (mpx_fde = port_ev->ev->fd_events; mpx_fde; mpx_fde = mpx_fde->next) { if (mpx_fde->fd != fde->fd) { continue; } if (mpx_fde == fde) { continue; } /* Same fd. */ break; } if (mpx_fde) { if (mpx_fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_HAS_MPX) { /* Logic error. Can't have more then 2 multiplexed fde's. */ tevent_debug(port_ev->ev, TEVENT_DEBUG_FATAL, "multiplex fde for fd[%d] is already multiplexed\n", mpx_fde->fd); return -1; } flags |= port_map_flags(mpx_fde->flags); } } if (!store_port_association(port_ev, fde, flags)) { tevent_debug(port_ev->ev, TEVENT_DEBUG_FATAL, "store_port_association failed for fd[%d]\n", fde->fd); return -1; } /* Note we have an association now. */ fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; /* Only if we want to read do we tell the event handler about errors. */ if (fde->flags & TEVENT_FD_READ) { fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR; } if (mpx_fde == NULL) { return 0; } /* Set up the multiplex pointer. Does no harm if already multiplexed. */ port_setup_multiplex_fd(port_ev, fde, mpx_fde); mpx_fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; /* Only if we want to read do we tell the event handler about errors. */ if (mpx_fde->flags & TEVENT_FD_READ) { mpx_fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR; } return 0; } /* Delete the port association for the given fd_event. */ static void port_del_event(struct port_event_context *port_ev, struct tevent_fd *fde) { struct tevent_fd *mpx_fde = NULL; fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR; if (fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * This is a multiplexed fde, we need to remove * both associations. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); mpx_fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; mpx_fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR; mpx_fde->additional_data = NULL; fde->additional_data = NULL; } delete_port_association(port_ev, fde); } /* Add or remove the port event from the given fd_event */ static int port_update_event(struct port_event_context *port_ev, struct tevent_fd *fde) { bool got_error = (fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_GOT_ERROR); bool want_read = (fde->flags & TEVENT_FD_READ); bool want_write = (fde->flags & TEVENT_FD_WRITE); struct tevent_fd *mpx_fde = NULL; if (fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * work out what the multiplexed fde wants. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); if (mpx_fde->flags & TEVENT_FD_READ) { want_read = true; } if (mpx_fde->flags & TEVENT_FD_WRITE) { want_write = true; } } if (fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION) { /* There's already an association. */ if (want_read || (want_write && !got_error)) { return port_add_event(port_ev, fde); } /* * If we want to match the select behavior, we need to remove the port event * when the caller isn't interested in events. */ port_del_event(port_ev, fde); return 0; } /* There's no port event attached to the fde. */ if (want_read || (want_write && !got_error)) { return port_add_event(port_ev, fde); } return 0; } /* Cope with port_get returning EPOLLHP|EPOLLERR on an association. Return true if there's nothing else to do, false if this event needs further handling. */ static bool port_handle_hup_or_err(struct port_event_context *port_ev, struct tevent_fd *fde) { if (fde == NULL) { return true; } fde->additional_flags |= PORT_ADDITIONAL_FD_FLAG_GOT_ERROR; /* * If we only wait for TEVENT_FD_WRITE, we should not tell the * event handler about it, and remove the port association, * as we only report error when waiting for read events, * to match the select() behavior. */ if (!(fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_REPORT_ERROR)) { /* * Do the same as the poll backend and * remove the writable flag. */ fde->flags &= ~TEVENT_FD_WRITE; return true; } /* This has TEVENT_FD_READ set, we're not finished. */ return false; } /* Event loop handling using Solaris ports. */ static int port_event_loop(struct port_event_context *port_ev, struct timeval *tvalp) { int ret; #define MAXEVENTS 1 port_event_t events[MAXEVENTS]; uint_t nget = 1; uint_t max_events = MAXEVENTS; uint_t i; int port_errno; struct timespec ts; struct tevent_context *ev = port_ev->ev; if (tvalp) { ts.tv_sec = tvalp->tv_sec; ts.tv_nsec = tvalp->tv_usec * 1000; } if (port_ev->ev->signal_events && tevent_common_check_signal(ev)) { return 0; } /* * Solaris triggers sending the event to the port * at the time the port association is done. Postpone * associating fd's until just before we get the events, * otherwise we can deadlock. */ if (associate_all_events(port_ev) != 0) { return -1; } tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_WAIT); ret = port_getn(port_ev->port_fd, events, max_events, &nget, &ts); port_errno = errno; tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_WAIT); if (ret == -1 && port_errno == EINTR) { if (ev->signal_events) { tevent_common_check_signal(ev); } /* * If no signal handlers we got an unsolicited * signal wakeup. This can happen with epoll * too. Just return and ignore. */ return 0; } if (ret == -1 && port_errno == ETIME) { /* * If errno is set to ETIME it is possible that we still got an event. * In that case we need to go through the processing loop so that we * reassociate the received event with the port or the association will * be lost so check the value of nget is 0 before returning. */ if (nget == 0) { /* we don't care about a possible delay here */ tevent_common_loop_timer_delay(ev); return 0; } /* * Set the return value to 0 since we do not actually have an error and we * do have events that need to be processed. This keeps us from getting * caught in the generic error test. */ ret = 0; } if (ret == -1) { tevent_debug(ev, TEVENT_DEBUG_ERROR, "port_get failed (%s)\n", strerror(errno)); return -1; } for (i = 0; i < nget; i++) { struct tevent_fd *mpx_fde = NULL; struct tevent_fd *fde = NULL; uint16_t flags = 0; struct port_associate_vals *val = talloc_get_type(events[i].portev_user, struct port_associate_vals); if (val == NULL) { tevent_debug(ev, TEVENT_DEBUG_ERROR, "port_getn() gave bad data"); return -1; } /* Mark this event as needing to be re-associated. */ val->associated_event = false; fde = val->fde; if (fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_HAS_MPX) { /* * Save off the multiplexed event in case we need * to use it to call the handler function. */ mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); } if (events[i].portev_events & (POLLHUP|POLLERR)) { bool handled_fde = port_handle_hup_or_err(port_ev, fde); bool handled_mpx = port_handle_hup_or_err(port_ev, mpx_fde); if (handled_fde && handled_mpx) { return port_update_event(port_ev, fde); } if (!handled_mpx) { /* * If the mpx event was the one that needs * further handling, it's the TEVENT_FD_READ * event so switch over and call that handler. */ fde = mpx_fde; mpx_fde = NULL; } flags |= TEVENT_FD_READ; } if (events[i].portev_events & POLLIN) { flags |= TEVENT_FD_READ; } if (events[i].portev_events & POLLOUT) { flags |= TEVENT_FD_WRITE; } if (flags & TEVENT_FD_WRITE) { if (fde->flags & TEVENT_FD_WRITE) { mpx_fde = NULL; } if (mpx_fde && (mpx_fde->flags & TEVENT_FD_WRITE)) { fde = mpx_fde; mpx_fde = NULL; } if (mpx_fde) { /* Ensure we got the right fde. */ if ((flags & fde->flags) == 0) { fde = mpx_fde; mpx_fde = NULL; } } } /* * Make sure we only pass the flags * the handler is expecting. */ flags &= fde->flags; if (flags) { return tevent_common_invoke_fd_handler(fde, flags, NULL); } } return 0; } /* create a port_event_context structure. */ static int port_event_context_init(struct tevent_context *ev) { int ret; struct port_event_context *port_ev; /* * We might be called during tevent_re_initialise() * which means we need to free our old additional_data. */ TALLOC_FREE(ev->additional_data); port_ev = talloc_zero(ev, struct port_event_context); if (!port_ev) { return -1; } port_ev->ev = ev; port_ev->port_fd = -1; port_ev->pid = (pid_t)-1; ret = port_init_ctx(port_ev); if (ret != 0) { talloc_free(port_ev); return ret; } ev->additional_data = port_ev; return 0; } /* destroy an fd_event */ static int port_event_fd_destructor(struct tevent_fd *fde) { struct tevent_context *ev = fde->event_ctx; struct port_event_context *port_ev = NULL; struct tevent_fd *mpx_fde = NULL; int flags = (int)fde->flags; if (ev == NULL) { return tevent_common_fd_destructor(fde); } port_ev = talloc_get_type_abort(ev->additional_data, struct port_event_context); DLIST_REMOVE(ev->fd_events, fde); if (fde->additional_flags & PORT_ADDITIONAL_FD_FLAG_HAS_MPX) { mpx_fde = talloc_get_type_abort(fde->additional_data, struct tevent_fd); fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_HAS_MPX; mpx_fde->additional_flags &= ~PORT_ADDITIONAL_FD_FLAG_HAS_MPX; fde->additional_data = NULL; mpx_fde->additional_data = NULL; fde->additional_flags &= PORT_ADDITIONAL_FD_FLAG_HAS_ASSOCIATION; } (void)port_check_reopen(port_ev); if (mpx_fde != NULL) { (void)port_update_event(port_ev, mpx_fde); } fde->flags = 0; (void)port_update_event(port_ev, fde); fde->flags = flags; return tevent_common_fd_destructor(fde); } /* add a fd based event return NULL on failure (memory allocation error) */ static struct tevent_fd *port_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct port_event_context *port_ev = talloc_get_type_abort(ev->additional_data, struct port_event_context); struct tevent_fd *fde; fde = tevent_common_add_fd(ev, mem_ctx, fd, flags, handler, private_data, handler_name, location); if (!fde) { return NULL; } talloc_set_destructor(fde, port_event_fd_destructor); if (port_check_reopen(port_ev) != 0) { TALLOC_FREE(fde); return NULL; } if (port_update_event(port_ev, fde) != 0) { TALLOC_FREE(fde); return NULL; } return fde; } /* set the fd event flags */ static void port_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags) { struct tevent_context *ev; struct port_event_context *port_ev; if (fde->flags == flags) { return; } ev = fde->event_ctx; port_ev = talloc_get_type_abort(ev->additional_data, struct port_event_context); fde->flags = flags; (void)port_check_reopen(port_ev); (void)port_update_event(port_ev, fde); } /* do a single event loop using the events defined in ev */ static int port_event_loop_once(struct tevent_context *ev, const char *location) { struct port_event_context *port_ev = talloc_get_type(ev->additional_data, struct port_event_context); struct timeval tval; if (ev->signal_events && tevent_common_check_signal(ev)) { return 0; } if (ev->threaded_contexts != NULL) { tevent_common_threaded_activate_immediate(ev); } if (ev->immediate_events && tevent_common_loop_immediate(ev)) { return 0; } tval = tevent_common_loop_timer_delay(ev); if (tevent_timeval_is_zero(&tval)) { return 0; } if (port_check_reopen(port_ev) != 0) { errno = EINVAL; return -1; } return port_event_loop(port_ev, &tval); } static const struct tevent_ops port_event_ops = { .context_init = port_event_context_init, .add_fd = port_event_add_fd, .set_fd_close_fn = tevent_common_fd_set_close_fn, .get_fd_flags = tevent_common_fd_get_flags, .set_fd_flags = port_event_set_fd_flags, .add_timer = tevent_common_add_timer_v2, .schedule_immediate = tevent_common_schedule_immediate, .add_signal = tevent_common_add_signal, .loop_once = port_event_loop_once, .loop_wait = tevent_common_loop_wait, }; _PRIVATE_ bool tevent_port_init(void) { if (!tevent_register_backend("port", &port_event_ops)) { return false; } tevent_set_default_backend("port"); return true; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_queue.c0000660000000000000000000001652100000000000015715 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Infrastructure for async requests Copyright (C) Volker Lendecke 2008 Copyright (C) Stefan Metzmacher 2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" struct tevent_queue_entry { struct tevent_queue_entry *prev, *next; struct tevent_queue *queue; bool triggered; struct tevent_req *req; struct tevent_context *ev; tevent_queue_trigger_fn_t trigger; void *private_data; }; struct tevent_queue { const char *name; const char *location; bool running; struct tevent_immediate *immediate; size_t length; struct tevent_queue_entry *list; }; static void tevent_queue_immediate_trigger(struct tevent_context *ev, struct tevent_immediate *im, void *private_data); static int tevent_queue_entry_destructor(struct tevent_queue_entry *e) { struct tevent_queue *q = e->queue; if (!q) { return 0; } DLIST_REMOVE(q->list, e); q->length--; if (!q->running) { return 0; } if (!q->list) { return 0; } if (q->list->triggered) { return 0; } tevent_schedule_immediate(q->immediate, q->list->ev, tevent_queue_immediate_trigger, q); return 0; } static int tevent_queue_destructor(struct tevent_queue *q) { q->running = false; while (q->list) { struct tevent_queue_entry *e = q->list; talloc_free(e); } return 0; } struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx, const char *name, const char *location) { struct tevent_queue *queue; queue = talloc_zero(mem_ctx, struct tevent_queue); if (!queue) { return NULL; } queue->name = talloc_strdup(queue, name); if (!queue->name) { talloc_free(queue); return NULL; } queue->immediate = tevent_create_immediate(queue); if (!queue->immediate) { talloc_free(queue); return NULL; } queue->location = location; /* queue is running by default */ queue->running = true; talloc_set_destructor(queue, tevent_queue_destructor); return queue; } static void tevent_queue_immediate_trigger(struct tevent_context *ev, struct tevent_immediate *im, void *private_data) { struct tevent_queue *q = talloc_get_type_abort(private_data, struct tevent_queue); if (!q->running) { return; } if (!q->list) { return; } q->list->triggered = true; q->list->trigger(q->list->req, q->list->private_data); } static struct tevent_queue_entry *tevent_queue_add_internal( struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data, bool allow_direct) { struct tevent_queue_entry *e; e = talloc_zero(req, struct tevent_queue_entry); if (e == NULL) { return NULL; } e->queue = queue; e->req = req; e->ev = ev; e->trigger = trigger; e->private_data = private_data; /* * if there is no trigger, it is just a blocker */ if (trigger == NULL) { e->triggered = true; } if (queue->length > 0) { /* * if there are already entries in the * queue do not optimize. */ allow_direct = false; } if (req->async.fn != NULL) { /* * If the caller wants to optimize for the * empty queue case, call the trigger only * if there is no callback defined for the * request yet. */ allow_direct = false; } DLIST_ADD_END(queue->list, e); queue->length++; talloc_set_destructor(e, tevent_queue_entry_destructor); if (!queue->running) { return e; } if (queue->list->triggered) { return e; } /* * If allowed we directly call the trigger * avoiding possible delays caused by * an immediate event. */ if (allow_direct) { queue->list->triggered = true; queue->list->trigger(queue->list->req, queue->list->private_data); return e; } tevent_schedule_immediate(queue->immediate, queue->list->ev, tevent_queue_immediate_trigger, queue); return e; } bool tevent_queue_add(struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data) { struct tevent_queue_entry *e; e = tevent_queue_add_internal(queue, ev, req, trigger, private_data, false); if (e == NULL) { return false; } return true; } struct tevent_queue_entry *tevent_queue_add_entry( struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data) { return tevent_queue_add_internal(queue, ev, req, trigger, private_data, false); } struct tevent_queue_entry *tevent_queue_add_optimize_empty( struct tevent_queue *queue, struct tevent_context *ev, struct tevent_req *req, tevent_queue_trigger_fn_t trigger, void *private_data) { return tevent_queue_add_internal(queue, ev, req, trigger, private_data, true); } void tevent_queue_entry_untrigger(struct tevent_queue_entry *entry) { if (entry->queue->running) { abort(); } if (entry->queue->list != entry) { abort(); } entry->triggered = false; } void tevent_queue_start(struct tevent_queue *queue) { if (queue->running) { /* already started */ return; } queue->running = true; if (!queue->list) { return; } if (queue->list->triggered) { return; } tevent_schedule_immediate(queue->immediate, queue->list->ev, tevent_queue_immediate_trigger, queue); } void tevent_queue_stop(struct tevent_queue *queue) { queue->running = false; } size_t tevent_queue_length(struct tevent_queue *queue) { return queue->length; } bool tevent_queue_running(struct tevent_queue *queue) { return queue->running; } struct tevent_queue_wait_state { uint8_t dummy; }; static void tevent_queue_wait_trigger(struct tevent_req *req, void *private_data); struct tevent_req *tevent_queue_wait_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct tevent_queue *queue) { struct tevent_req *req; struct tevent_queue_wait_state *state; bool ok; req = tevent_req_create(mem_ctx, &state, struct tevent_queue_wait_state); if (req == NULL) { return NULL; } ok = tevent_queue_add(queue, ev, req, tevent_queue_wait_trigger, NULL); if (!ok) { tevent_req_oom(req); return tevent_req_post(req, ev); } return req; } static void tevent_queue_wait_trigger(struct tevent_req *req, void *private_data) { tevent_req_done(req); } bool tevent_queue_wait_recv(struct tevent_req *req) { enum tevent_req_state state; uint64_t err; if (tevent_req_is_error(req, &state, &err)) { tevent_req_received(req); return false; } tevent_req_received(req); return true; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_req.c0000660000000000000000000003166300000000000015364 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Infrastructure for async requests Copyright (C) Volker Lendecke 2008 Copyright (C) Stefan Metzmacher 2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx) { return talloc_asprintf(mem_ctx, "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] " " state[%s (%p)] timer[%p] finish[%s]", req, req->internal.create_location, req->internal.state, (unsigned long long)req->internal.error, (unsigned long long)req->internal.error, req->internal.private_type, req->data, req->internal.timer, req->internal.finish_location ); } char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req) { if (req == NULL) { return talloc_strdup(mem_ctx, "tevent_req[NULL]"); } if (!req->private_print) { return tevent_req_default_print(req, mem_ctx); } return req->private_print(req, mem_ctx); } static int tevent_req_destructor(struct tevent_req *req); struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx, void *pdata, size_t data_size, const char *type, const char *location) { struct tevent_req *req; struct tevent_req *parent; void **ppdata = (void **)pdata; void *data; size_t payload; payload = sizeof(struct tevent_immediate) + data_size; if (payload < sizeof(struct tevent_immediate)) { /* overflow */ return NULL; } req = talloc_pooled_object( mem_ctx, struct tevent_req, 2, sizeof(struct tevent_immediate) + data_size); if (req == NULL) { return NULL; } *req = (struct tevent_req) { .internal = { .private_type = type, .create_location = location, .state = TEVENT_REQ_IN_PROGRESS, .trigger = tevent_create_immediate(req), }, }; data = talloc_zero_size(req, data_size); /* * No need to check for req->internal.trigger!=NULL or * data!=NULL, this can't fail: talloc_pooled_object has * already allocated sufficient memory. */ talloc_set_name_const(data, type); req->data = data; talloc_set_destructor(req, tevent_req_destructor); parent = talloc_get_type(talloc_parent(mem_ctx), struct tevent_req); if ((parent != NULL) && (parent->internal.profile != NULL)) { bool ok = tevent_req_set_profile(req); if (!ok) { TALLOC_FREE(req); return NULL; } req->internal.profile->parent = parent->internal.profile; DLIST_ADD_END(parent->internal.profile->subprofiles, req->internal.profile); } *ppdata = data; return req; } static int tevent_req_destructor(struct tevent_req *req) { tevent_req_received(req); return 0; } void _tevent_req_notify_callback(struct tevent_req *req, const char *location) { req->internal.finish_location = location; if (req->internal.defer_callback_ev) { (void)tevent_req_post(req, req->internal.defer_callback_ev); req->internal.defer_callback_ev = NULL; return; } if (req->async.fn != NULL) { req->async.fn(req); } } static void tevent_req_cleanup(struct tevent_req *req) { if (req->private_cleanup.fn == NULL) { return; } if (req->private_cleanup.state >= req->internal.state) { /* * Don't call the cleanup_function multiple times for the same * state recursively */ return; } req->private_cleanup.state = req->internal.state; req->private_cleanup.fn(req, req->internal.state); } static void tevent_req_finish(struct tevent_req *req, enum tevent_req_state state, const char *location) { struct tevent_req_profile *p; /* * make sure we do not timeout after * the request was already finished */ TALLOC_FREE(req->internal.timer); req->internal.state = state; req->internal.finish_location = location; tevent_req_cleanup(req); p = req->internal.profile; if (p != NULL) { p->stop_location = location; p->stop_time = tevent_timeval_current(); p->state = state; p->user_error = req->internal.error; if (p->parent != NULL) { talloc_steal(p->parent, p); req->internal.profile = NULL; } } _tevent_req_notify_callback(req, location); } void _tevent_req_done(struct tevent_req *req, const char *location) { tevent_req_finish(req, TEVENT_REQ_DONE, location); } bool _tevent_req_error(struct tevent_req *req, uint64_t error, const char *location) { if (error == 0) { return false; } req->internal.error = error; tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location); return true; } void _tevent_req_oom(struct tevent_req *req, const char *location) { tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location); } bool _tevent_req_nomem(const void *p, struct tevent_req *req, const char *location) { if (p != NULL) { return false; } _tevent_req_oom(req, location); return true; } /** * @internal * * @brief Immediate event callback. * * @param[in] ev The event context to use. * * @param[in] im The immediate event. * * @param[in] priv The async request to be finished. */ static void tevent_req_trigger(struct tevent_context *ev, struct tevent_immediate *im, void *private_data) { struct tevent_req *req = talloc_get_type_abort(private_data, struct tevent_req); tevent_req_finish(req, req->internal.state, req->internal.finish_location); } struct tevent_req *tevent_req_post(struct tevent_req *req, struct tevent_context *ev) { tevent_schedule_immediate(req->internal.trigger, ev, tevent_req_trigger, req); return req; } void tevent_req_defer_callback(struct tevent_req *req, struct tevent_context *ev) { req->internal.defer_callback_ev = ev; } bool tevent_req_is_in_progress(struct tevent_req *req) { if (req->internal.state == TEVENT_REQ_IN_PROGRESS) { return true; } return false; } void tevent_req_received(struct tevent_req *req) { talloc_set_destructor(req, NULL); req->private_print = NULL; req->private_cancel = NULL; TALLOC_FREE(req->internal.trigger); TALLOC_FREE(req->internal.timer); req->internal.state = TEVENT_REQ_RECEIVED; tevent_req_cleanup(req); TALLOC_FREE(req->data); } bool tevent_req_poll(struct tevent_req *req, struct tevent_context *ev) { while (tevent_req_is_in_progress(req)) { int ret; ret = tevent_loop_once(ev); if (ret != 0) { return false; } } return true; } bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state, uint64_t *error) { if (req->internal.state == TEVENT_REQ_DONE) { return false; } if (req->internal.state == TEVENT_REQ_USER_ERROR) { *error = req->internal.error; } *state = req->internal.state; return true; } static void tevent_req_timedout(struct tevent_context *ev, struct tevent_timer *te, struct timeval now, void *private_data) { struct tevent_req *req = talloc_get_type_abort(private_data, struct tevent_req); TALLOC_FREE(req->internal.timer); tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__); } bool tevent_req_set_endtime(struct tevent_req *req, struct tevent_context *ev, struct timeval endtime) { TALLOC_FREE(req->internal.timer); req->internal.timer = tevent_add_timer(ev, req, endtime, tevent_req_timedout, req); if (tevent_req_nomem(req->internal.timer, req)) { return false; } return true; } void tevent_req_reset_endtime(struct tevent_req *req) { TALLOC_FREE(req->internal.timer); } void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt) { req->async.fn = fn; req->async.private_data = pvt; } void *_tevent_req_callback_data(struct tevent_req *req) { return req->async.private_data; } void *_tevent_req_data(struct tevent_req *req) { return req->data; } void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn) { req->private_print = fn; } void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn) { req->private_cancel = fn; } bool _tevent_req_cancel(struct tevent_req *req, const char *location) { if (req->private_cancel == NULL) { return false; } return req->private_cancel(req); } void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn) { req->private_cleanup.state = req->internal.state; req->private_cleanup.fn = fn; } static int tevent_req_profile_destructor(struct tevent_req_profile *p); bool tevent_req_set_profile(struct tevent_req *req) { struct tevent_req_profile *p; if (req->internal.profile != NULL) { tevent_req_error(req, EINVAL); return false; } p = tevent_req_profile_create(req); if (tevent_req_nomem(p, req)) { return false; } p->req_name = talloc_get_name(req->data); p->start_location = req->internal.create_location; p->start_time = tevent_timeval_current(); req->internal.profile = p; return true; } static int tevent_req_profile_destructor(struct tevent_req_profile *p) { if (p->parent != NULL) { DLIST_REMOVE(p->parent->subprofiles, p); p->parent = NULL; } while (p->subprofiles != NULL) { p->subprofiles->parent = NULL; DLIST_REMOVE(p->subprofiles, p->subprofiles); } return 0; } struct tevent_req_profile *tevent_req_move_profile(struct tevent_req *req, TALLOC_CTX *mem_ctx) { return talloc_move(mem_ctx, &req->internal.profile); } const struct tevent_req_profile *tevent_req_get_profile( struct tevent_req *req) { return req->internal.profile; } void tevent_req_profile_get_name(const struct tevent_req_profile *profile, const char **req_name) { if (req_name != NULL) { *req_name = profile->req_name; } } void tevent_req_profile_get_start(const struct tevent_req_profile *profile, const char **start_location, struct timeval *start_time) { if (start_location != NULL) { *start_location = profile->start_location; } if (start_time != NULL) { *start_time = profile->start_time; } } void tevent_req_profile_get_stop(const struct tevent_req_profile *profile, const char **stop_location, struct timeval *stop_time) { if (stop_location != NULL) { *stop_location = profile->stop_location; } if (stop_time != NULL) { *stop_time = profile->stop_time; } } void tevent_req_profile_get_status(const struct tevent_req_profile *profile, pid_t *pid, enum tevent_req_state *state, uint64_t *user_error) { if (pid != NULL) { *pid = profile->pid; } if (state != NULL) { *state = profile->state; } if (user_error != NULL) { *user_error = profile->user_error; } } const struct tevent_req_profile *tevent_req_profile_get_subprofiles( const struct tevent_req_profile *profile) { return profile->subprofiles; } const struct tevent_req_profile *tevent_req_profile_next( const struct tevent_req_profile *profile) { return profile->next; } struct tevent_req_profile *tevent_req_profile_create(TALLOC_CTX *mem_ctx) { struct tevent_req_profile *result; result = talloc_zero(mem_ctx, struct tevent_req_profile); if (result == NULL) { return NULL; } talloc_set_destructor(result, tevent_req_profile_destructor); return result; } bool tevent_req_profile_set_name(struct tevent_req_profile *profile, const char *req_name) { profile->req_name = talloc_strdup(profile, req_name); return (profile->req_name != NULL); } bool tevent_req_profile_set_start(struct tevent_req_profile *profile, const char *start_location, struct timeval start_time) { profile->start_time = start_time; profile->start_location = talloc_strdup(profile, start_location); return (profile->start_location != NULL); } bool tevent_req_profile_set_stop(struct tevent_req_profile *profile, const char *stop_location, struct timeval stop_time) { profile->stop_time = stop_time; profile->stop_location = talloc_strdup(profile, stop_location); return (profile->stop_location != NULL); } void tevent_req_profile_set_status(struct tevent_req_profile *profile, pid_t pid, enum tevent_req_state state, uint64_t user_error) { profile->pid = pid; profile->state = state; profile->user_error = user_error; } void tevent_req_profile_append_sub(struct tevent_req_profile *parent_profile, struct tevent_req_profile **sub_profile) { struct tevent_req_profile *sub; sub = talloc_move(parent_profile, sub_profile); sub->parent = parent_profile; DLIST_ADD_END(parent_profile->subprofiles, sub); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_signal.c0000660000000000000000000003265600000000000016055 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. common events code for signal events Copyright (C) Andrew Tridgell 2007 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/wait.h" #define TEVENT_DEPRECATED 1 #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" /* maximum number of SA_SIGINFO signals to hold in the queue. NB. This *MUST* be a power of 2, in order for the ring buffer wrap to work correctly. Thanks to Petr Vandrovec for this. */ #define TEVENT_SA_INFO_QUEUE_COUNT 256 size_t tevent_num_signals(void) { return TEVENT_NUM_SIGNALS; } size_t tevent_sa_info_queue_count(void) { return TEVENT_SA_INFO_QUEUE_COUNT; } struct tevent_sigcounter { uint32_t count; uint32_t seen; }; #if defined(HAVE___SYNC_FETCH_AND_ADD) #define TEVENT_SIG_INCREMENT(s) __sync_fetch_and_add(&((s).count), 1) #elif defined(HAVE_ATOMIC_ADD_32) #define TEVENT_SIG_INCREMENT(s) atomic_add_32(&((s).count), 1) #else #define TEVENT_SIG_INCREMENT(s) (s).count++ #endif #define TEVENT_SIG_SEEN(s, n) (s).seen += (n) #define TEVENT_SIG_PENDING(s) ((s).seen != (s).count) struct tevent_common_signal_list { struct tevent_common_signal_list *prev, *next; struct tevent_signal *se; }; /* the poor design of signals means that this table must be static global */ static struct tevent_sig_state { struct tevent_common_signal_list *sig_handlers[TEVENT_NUM_SIGNALS+1]; struct sigaction *oldact[TEVENT_NUM_SIGNALS+1]; struct tevent_sigcounter signal_count[TEVENT_NUM_SIGNALS+1]; struct tevent_sigcounter got_signal; #ifdef SA_SIGINFO /* with SA_SIGINFO we get quite a lot of info per signal */ siginfo_t *sig_info[TEVENT_NUM_SIGNALS+1]; struct tevent_sigcounter sig_blocked[TEVENT_NUM_SIGNALS+1]; #endif } *sig_state; /* return number of sigcounter events not processed yet */ static uint32_t tevent_sig_count(struct tevent_sigcounter s) { return s.count - s.seen; } /* signal handler - redirects to registered signals */ static void tevent_common_signal_handler(int signum) { struct tevent_common_signal_list *sl; struct tevent_context *ev = NULL; int saved_errno = errno; TEVENT_SIG_INCREMENT(sig_state->signal_count[signum]); TEVENT_SIG_INCREMENT(sig_state->got_signal); /* Write to each unique event context. */ for (sl = sig_state->sig_handlers[signum]; sl; sl = sl->next) { if (sl->se->event_ctx && sl->se->event_ctx != ev) { ev = sl->se->event_ctx; tevent_common_wakeup(ev); } } errno = saved_errno; } #ifdef SA_SIGINFO /* signal handler with SA_SIGINFO - redirects to registered signals */ static void tevent_common_signal_handler_info(int signum, siginfo_t *info, void *uctx) { uint32_t count = tevent_sig_count(sig_state->signal_count[signum]); /* sig_state->signal_count[signum].seen % TEVENT_SA_INFO_QUEUE_COUNT * is the base of the unprocessed signals in the ringbuffer. */ uint32_t ofs = (sig_state->signal_count[signum].seen + count) % TEVENT_SA_INFO_QUEUE_COUNT; sig_state->sig_info[signum][ofs] = *info; tevent_common_signal_handler(signum); /* handle SA_SIGINFO */ if (count+1 == TEVENT_SA_INFO_QUEUE_COUNT) { /* we've filled the info array - block this signal until these ones are delivered */ #ifdef HAVE_UCONTEXT_T /* * This is the only way for this to work. * By default signum is blocked inside this * signal handler using a temporary mask, * but what we really need to do now is * block it in the callers mask, so it * stays blocked when the temporary signal * handler mask is replaced when we return * from here. The callers mask can be found * in the ucontext_t passed in as the * void *uctx argument. */ ucontext_t *ucp = (ucontext_t *)uctx; sigaddset(&ucp->uc_sigmask, signum); #else /* * WARNING !!! WARNING !!!! * * This code doesn't work. * By default signum is blocked inside this * signal handler, but calling sigprocmask * modifies the temporary signal mask being * used *inside* this handler, which will be * replaced by the callers signal mask once * we return from here. See Samba * bug #9550 for details. */ sigset_t set; sigemptyset(&set); sigaddset(&set, signum); sigprocmask(SIG_BLOCK, &set, NULL); #endif TEVENT_SIG_INCREMENT(sig_state->sig_blocked[signum]); } } #endif static int tevent_common_signal_list_destructor(struct tevent_common_signal_list *sl) { if (sig_state->sig_handlers[sl->se->signum]) { DLIST_REMOVE(sig_state->sig_handlers[sl->se->signum], sl); } return 0; } /* destroy a signal event */ static int tevent_signal_destructor(struct tevent_signal *se) { if (se->destroyed) { tevent_common_check_double_free(se, "tevent_signal double free"); goto done; } se->destroyed = true; TALLOC_FREE(se->additional_data); if (se->event_ctx != NULL) { tevent_trace_signal_callback(se->event_ctx, se, TEVENT_EVENT_TRACE_DETACH); DLIST_REMOVE(se->event_ctx->signal_events, se); } if (sig_state->sig_handlers[se->signum] == NULL) { /* restore old handler, if any */ if (sig_state->oldact[se->signum]) { sigaction(se->signum, sig_state->oldact[se->signum], NULL); TALLOC_FREE(sig_state->oldact[se->signum]); } #ifdef SA_SIGINFO if (se->sa_flags & SA_SIGINFO) { if (sig_state->sig_info[se->signum]) { TALLOC_FREE(sig_state->sig_info[se->signum]); } } #endif } se->event_ctx = NULL; done: if (se->busy) { return -1; } se->wrapper = NULL; return 0; } /* add a signal event return NULL on failure (memory allocation error) */ struct tevent_signal *tevent_common_add_signal(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct tevent_signal *se; struct tevent_common_signal_list *sl; sigset_t set, oldset; int ret; ret = tevent_common_wakeup_init(ev); if (ret != 0) { errno = ret; return NULL; } if (signum >= TEVENT_NUM_SIGNALS) { errno = EINVAL; return NULL; } /* the sig_state needs to be on a global context as it can last across multiple event contexts */ if (sig_state == NULL) { sig_state = talloc_zero(NULL, struct tevent_sig_state); if (sig_state == NULL) { return NULL; } } se = talloc_zero(mem_ctx?mem_ctx:ev, struct tevent_signal); if (se == NULL) return NULL; sl = talloc_zero(se, struct tevent_common_signal_list); if (!sl) { talloc_free(se); return NULL; } sl->se = se; *se = (struct tevent_signal) { .event_ctx = ev, .signum = signum, .sa_flags = sa_flags, .handler = handler, .private_data = private_data, .handler_name = handler_name, .location = location, .additional_data= sl, }; /* Ensure, no matter the destruction order, that we always have a handle on the global sig_state */ if (!talloc_reference(se, sig_state)) { talloc_free(se); return NULL; } /* only install a signal handler if not already installed */ if (sig_state->sig_handlers[signum] == NULL) { struct sigaction act; ZERO_STRUCT(act); act.sa_handler = tevent_common_signal_handler; act.sa_flags = sa_flags; #ifdef SA_SIGINFO if (sa_flags & SA_SIGINFO) { act.sa_handler = NULL; act.sa_sigaction = tevent_common_signal_handler_info; if (sig_state->sig_info[signum] == NULL) { sig_state->sig_info[signum] = talloc_zero_array(sig_state, siginfo_t, TEVENT_SA_INFO_QUEUE_COUNT); if (sig_state->sig_info[signum] == NULL) { talloc_free(se); return NULL; } } } #endif sig_state->oldact[signum] = talloc_zero(sig_state, struct sigaction); if (sig_state->oldact[signum] == NULL) { talloc_free(se); return NULL; } if (sigaction(signum, &act, sig_state->oldact[signum]) == -1) { talloc_free(sig_state->oldact[signum]); sig_state->oldact[signum] = NULL; talloc_free(se); return NULL; } } DLIST_ADD(se->event_ctx->signal_events, se); /* Make sure the signal doesn't come in while we're mangling list. */ sigemptyset(&set); sigaddset(&set, signum); sigprocmask(SIG_BLOCK, &set, &oldset); tevent_trace_signal_callback(se->event_ctx, se, TEVENT_EVENT_TRACE_ATTACH); DLIST_ADD(sig_state->sig_handlers[signum], sl); sigprocmask(SIG_SETMASK, &oldset, NULL); talloc_set_destructor(se, tevent_signal_destructor); talloc_set_destructor(sl, tevent_common_signal_list_destructor); return se; } int tevent_common_invoke_signal_handler(struct tevent_signal *se, int signum, int count, void *siginfo, bool *removed) { struct tevent_context *handler_ev = se->event_ctx; bool remove = false; if (removed != NULL) { *removed = false; } if (se->event_ctx == NULL) { return 0; } se->busy = true; if (se->wrapper != NULL) { handler_ev = se->wrapper->wrap_ev; tevent_wrapper_push_use_internal(handler_ev, se->wrapper); se->wrapper->ops->before_signal_handler( se->wrapper->wrap_ev, se->wrapper->private_state, se->wrapper->main_ev, se, signum, count, siginfo, se->handler_name, se->location); } tevent_trace_signal_callback(se->event_ctx, se, TEVENT_EVENT_TRACE_BEFORE_HANDLER); se->handler(handler_ev, se, signum, count, siginfo, se->private_data); if (se->wrapper != NULL) { se->wrapper->ops->after_signal_handler( se->wrapper->wrap_ev, se->wrapper->private_state, se->wrapper->main_ev, se, signum, count, siginfo, se->handler_name, se->location); tevent_wrapper_pop_use_internal(handler_ev, se->wrapper); } se->busy = false; #ifdef SA_RESETHAND if (se->sa_flags & SA_RESETHAND) { remove = true; } #endif if (se->destroyed) { talloc_set_destructor(se, NULL); remove = true; } if (remove) { TALLOC_FREE(se); if (removed != NULL) { *removed = true; } } return 0; } /* check if a signal is pending return != 0 if a signal was pending */ int tevent_common_check_signal(struct tevent_context *ev) { int i; if (!sig_state || !TEVENT_SIG_PENDING(sig_state->got_signal)) { return 0; } for (i=0;isignal_count[i]; uint32_t count = tevent_sig_count(counter); int ret; #ifdef SA_SIGINFO /* Ensure we null out any stored siginfo_t entries * after processing for debugging purposes. */ bool clear_processed_siginfo = false; #endif if (count == 0) { continue; } for (sl=sig_state->sig_handlers[i];sl;sl=next) { struct tevent_signal *se = sl->se; next = sl->next; #ifdef SA_SIGINFO if (se->sa_flags & SA_SIGINFO) { uint32_t j; clear_processed_siginfo = true; for (j=0;jsignal_count[i].seen * % TEVENT_SA_INFO_QUEUE_COUNT is * the base position of the unprocessed * signals in the ringbuffer. */ uint32_t ofs = (counter.seen + j) % TEVENT_SA_INFO_QUEUE_COUNT; bool removed = false; ret = tevent_common_invoke_signal_handler( se, i, 1, (void*)&sig_state->sig_info[i][ofs], &removed); if (ret != 0) { tevent_abort(ev, "tevent_common_invoke_signal_handler() failed"); } if (removed) { break; } } continue; } #endif ret = tevent_common_invoke_signal_handler(se, i, count, NULL, NULL); if (ret != 0) { tevent_abort(ev, "tevent_common_invoke_signal_handler() failed"); } } #ifdef SA_SIGINFO if (clear_processed_siginfo && sig_state->sig_info[i] != NULL) { uint32_t j; for (j=0;jsig_info[i][ofs], '\0', sizeof(siginfo_t)); } } #endif TEVENT_SIG_SEEN(sig_state->signal_count[i], count); TEVENT_SIG_SEEN(sig_state->got_signal, count); #ifdef SA_SIGINFO if (TEVENT_SIG_PENDING(sig_state->sig_blocked[i])) { /* We'd filled the queue, unblock the signal now the queue is empty again. Note we MUST do this after the TEVENT_SIG_SEEN(sig_state->signal_count[i], count) call to prevent a new signal running out of room in the sig_state->sig_info[i][] ring buffer. */ sigset_t set; sigemptyset(&set); sigaddset(&set, i); TEVENT_SIG_SEEN(sig_state->sig_blocked[i], tevent_sig_count(sig_state->sig_blocked[i])); sigprocmask(SIG_UNBLOCK, &set, NULL); } #endif } return 1; } void tevent_cleanup_pending_signal_handlers(struct tevent_signal *se) { tevent_signal_destructor(se); talloc_set_destructor(se, NULL); return; } void tevent_signal_set_tag(struct tevent_signal *se, uint64_t tag) { if (se == NULL) { return; } se->tag = tag; } uint64_t tevent_signal_get_tag(const struct tevent_signal *se) { if (se == NULL) { return 0; } return se->tag; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_standard.c0000660000000000000000000001376700000000000016402 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. main select loop and event handling Copyright (C) Stefan Metzmacher 2013 Copyright (C) Jeremy Allison 2013 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* This is SAMBA's default event loop code - we try to use epoll if configure detected support for it otherwise we use poll() - if epoll is broken on the system or the kernel doesn't support it at runtime we fallback to poll() */ #include "replace.h" #include "tevent.h" #include "tevent_util.h" #include "tevent_internal.h" struct std_event_glue { const struct tevent_ops *epoll_ops; const struct tevent_ops *poll_ops; struct tevent_ops *glue_ops; bool fallback_replay; }; static int std_event_context_init(struct tevent_context *ev); static const struct tevent_ops std_event_ops = { .context_init = std_event_context_init, }; /* If this function gets called. epoll failed at runtime. Move us to using poll instead. If we return false here, caller should abort(). */ #ifdef HAVE_EPOLL static bool std_fallback_to_poll(struct tevent_context *ev, bool replay) { void *glue_ptr = talloc_parent(ev->ops); struct std_event_glue *glue = talloc_get_type_abort(glue_ptr, struct std_event_glue); int ret; struct tevent_fd *fde; glue->fallback_replay = replay; /* First switch all the ops to poll. */ glue->epoll_ops = NULL; /* * Set custom_ops the same as poll. */ *glue->glue_ops = *glue->poll_ops; glue->glue_ops->context_init = std_event_context_init; /* Next initialize the poll backend. */ ret = glue->poll_ops->context_init(ev); if (ret != 0) { return false; } /* * Now we have to change all the existing file descriptor * events from the epoll backend to the poll backend. */ for (fde = ev->fd_events; fde; fde = fde->next) { bool ok; /* Re-add this event as a poll backend event. */ ok = tevent_poll_event_add_fd_internal(ev, fde); if (!ok) { return false; } } return true; } #endif static int std_event_loop_once(struct tevent_context *ev, const char *location) { void *glue_ptr = talloc_parent(ev->ops); struct std_event_glue *glue = talloc_get_type_abort(glue_ptr, struct std_event_glue); int ret; ret = glue->epoll_ops->loop_once(ev, location); /* * If the above hasn't panicked due to an epoll interface failure, * std_fallback_to_poll() wasn't called, and hasn't cleared epoll_ops to * signify fallback to poll_ops. */ if (glue->epoll_ops != NULL) { /* No fallback */ return ret; } if (!glue->fallback_replay) { /* * The problem happened while modifying an event. * An event handler was triggered in this case * and there is no need to call loop_once() again. */ return ret; } return glue->poll_ops->loop_once(ev, location); } static int std_event_loop_wait(struct tevent_context *ev, const char *location) { void *glue_ptr = talloc_parent(ev->ops); struct std_event_glue *glue = talloc_get_type_abort(glue_ptr, struct std_event_glue); int ret; ret = glue->epoll_ops->loop_wait(ev, location); /* * If the above hasn't panicked due to an epoll interface failure, * std_fallback_to_poll() wasn't called, and hasn't cleared epoll_ops to * signify fallback to poll_ops. */ if (glue->epoll_ops != NULL) { /* No fallback */ return ret; } return glue->poll_ops->loop_wait(ev, location); } /* Initialize the epoll backend and allow it to call a switch function if epoll fails at runtime. */ static int std_event_context_init(struct tevent_context *ev) { struct std_event_glue *glue; int ret; /* * If this is the first initialization * we need to set up the allocated ops * pointers. */ if (ev->ops == &std_event_ops) { glue = talloc_zero(ev, struct std_event_glue); if (glue == NULL) { return -1; } glue->epoll_ops = tevent_find_ops_byname("epoll"); glue->poll_ops = tevent_find_ops_byname("poll"); if (glue->poll_ops == NULL) { return -1; } /* * Allocate space for our custom ops. * Allocate as a child of our epoll_ops pointer * so we can easily get to it using talloc_parent. */ glue->glue_ops = talloc_zero(glue, struct tevent_ops); if (glue->glue_ops == NULL) { talloc_free(glue); return -1; } ev->ops = glue->glue_ops; } else { void *glue_ptr = talloc_parent(ev->ops); glue = talloc_get_type_abort(glue_ptr, struct std_event_glue); } if (glue->epoll_ops != NULL) { /* * Set custom_ops the same as epoll, * except re-init using std_event_context_init() * and use std_event_loop_once() to add the * ability to fallback to a poll backend on * epoll runtime error. */ *glue->glue_ops = *glue->epoll_ops; glue->glue_ops->context_init = std_event_context_init; glue->glue_ops->loop_once = std_event_loop_once; glue->glue_ops->loop_wait = std_event_loop_wait; ret = glue->epoll_ops->context_init(ev); if (ret == -1) { goto fallback; } #ifdef HAVE_EPOLL tevent_epoll_set_panic_fallback(ev, std_fallback_to_poll); #endif return ret; } fallback: glue->epoll_ops = NULL; /* * Set custom_ops the same as poll. */ *glue->glue_ops = *glue->poll_ops; glue->glue_ops->context_init = std_event_context_init; return glue->poll_ops->context_init(ev); } _PRIVATE_ bool tevent_standard_init(void) { return tevent_register_backend("standard", &std_event_ops); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_threads.c0000660000000000000000000003257600000000000016233 0ustar00rootroot00000000000000/* tevent event library. Copyright (C) Jeremy Allison 2015 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "talloc.h" #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" #ifdef HAVE_PTHREAD #include "system/threads.h" struct tevent_immediate_list { struct tevent_immediate_list *next, *prev; tevent_immediate_handler_t handler; struct tevent_immediate *im; void *private_ptr; }; struct tevent_thread_proxy { pthread_mutex_t mutex; struct tevent_context *dest_ev_ctx; int read_fd; int write_fd; struct tevent_fd *pipe_read_fde; /* Pending events list. */ struct tevent_immediate_list *im_list; /* Completed events list. */ struct tevent_immediate_list *tofree_im_list; struct tevent_immediate *free_im; }; static void free_im_list(struct tevent_immediate_list **pp_list_head) { struct tevent_immediate_list *im_entry = NULL; struct tevent_immediate_list *im_next = NULL; for (im_entry = *pp_list_head; im_entry; im_entry = im_next) { im_next = im_entry->next; DLIST_REMOVE(*pp_list_head, im_entry); TALLOC_FREE(im_entry); } } static void free_list_handler(struct tevent_context *ev, struct tevent_immediate *im, void *private_ptr) { struct tevent_thread_proxy *tp = talloc_get_type_abort(private_ptr, struct tevent_thread_proxy); int ret; ret = pthread_mutex_lock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return; } free_im_list(&tp->tofree_im_list); ret = pthread_mutex_unlock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return; } } static void schedule_immediate_functions(struct tevent_thread_proxy *tp) { struct tevent_immediate_list *im_entry = NULL; struct tevent_immediate_list *im_next = NULL; for (im_entry = tp->im_list; im_entry; im_entry = im_next) { im_next = im_entry->next; DLIST_REMOVE(tp->im_list, im_entry); tevent_schedule_immediate(im_entry->im, tp->dest_ev_ctx, im_entry->handler, im_entry->private_ptr); /* Move from pending list to free list. */ DLIST_ADD(tp->tofree_im_list, im_entry); } if (tp->tofree_im_list != NULL) { /* * Once the current immediate events * are processed, we need to reschedule * ourselves to free them. This works * as tevent_schedule_immediate() * always adds events to the *END* of * the immediate events list. */ tevent_schedule_immediate(tp->free_im, tp->dest_ev_ctx, free_list_handler, tp); } } static void pipe_read_handler(struct tevent_context *ev, struct tevent_fd *fde, uint16_t flags, void *private_ptr) { struct tevent_thread_proxy *tp = talloc_get_type_abort(private_ptr, struct tevent_thread_proxy); ssize_t len = 64; int ret; ret = pthread_mutex_lock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return; } /* * Clear out all data in the pipe. We * don't really care if this returns -1. */ while (len == 64) { char buf[64]; len = read(tp->read_fd, buf, 64); }; schedule_immediate_functions(tp); ret = pthread_mutex_unlock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return; } } static int tevent_thread_proxy_destructor(struct tevent_thread_proxy *tp) { int ret; ret = pthread_mutex_lock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return 0; } TALLOC_FREE(tp->pipe_read_fde); if (tp->read_fd != -1) { (void)close(tp->read_fd); tp->read_fd = -1; } if (tp->write_fd != -1) { (void)close(tp->write_fd); tp->write_fd = -1; } /* Hmmm. It's probably an error if we get here with any non-NULL immediate entries.. */ free_im_list(&tp->im_list); free_im_list(&tp->tofree_im_list); TALLOC_FREE(tp->free_im); ret = pthread_mutex_unlock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return 0; } ret = pthread_mutex_destroy(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return 0; } return 0; } /* * Create a struct that can be passed to other threads * to allow them to signal the struct tevent_context * * passed in. */ struct tevent_thread_proxy *tevent_thread_proxy_create( struct tevent_context *dest_ev_ctx) { int ret; int pipefds[2]; struct tevent_thread_proxy *tp; if (dest_ev_ctx->wrapper.glue != NULL) { /* * stacking of wrappers is not supported */ tevent_debug(dest_ev_ctx->wrapper.glue->main_ev, TEVENT_DEBUG_FATAL, "%s() not allowed on a wrapper context\n", __func__); errno = EINVAL; return NULL; } tp = talloc_zero(dest_ev_ctx, struct tevent_thread_proxy); if (tp == NULL) { return NULL; } ret = pthread_mutex_init(&tp->mutex, NULL); if (ret != 0) { goto fail; } tp->dest_ev_ctx = dest_ev_ctx; tp->read_fd = -1; tp->write_fd = -1; talloc_set_destructor(tp, tevent_thread_proxy_destructor); ret = pipe(pipefds); if (ret == -1) { goto fail; } tp->read_fd = pipefds[0]; tp->write_fd = pipefds[1]; ret = ev_set_blocking(pipefds[0], false); if (ret != 0) { goto fail; } ret = ev_set_blocking(pipefds[1], false); if (ret != 0) { goto fail; } if (!ev_set_close_on_exec(pipefds[0])) { goto fail; } if (!ev_set_close_on_exec(pipefds[1])) { goto fail; } tp->pipe_read_fde = tevent_add_fd(dest_ev_ctx, tp, tp->read_fd, TEVENT_FD_READ, pipe_read_handler, tp); if (tp->pipe_read_fde == NULL) { goto fail; } /* * Create an immediate event to free * completed lists. */ tp->free_im = tevent_create_immediate(tp); if (tp->free_im == NULL) { goto fail; } return tp; fail: TALLOC_FREE(tp); return NULL; } /* * This function schedules an immediate event to be called with argument * *pp_private in the thread context of dest_ev_ctx. Caller doesn't * wait for activation to take place, this is simply fire-and-forget. * * pp_im must be a pointer to an immediate event talloced on * a context owned by the calling thread, or the NULL context. * Ownership of *pp_im will be transfered to the tevent library. * * pp_private can be null, or contents of *pp_private must be * talloc'ed memory on a context owned by the calling thread * or the NULL context. If non-null, ownership of *pp_private will * be transfered to the tevent library. * * If you want to return a message, have the destination use the * same function call to send back to the caller. */ void tevent_thread_proxy_schedule(struct tevent_thread_proxy *tp, struct tevent_immediate **pp_im, tevent_immediate_handler_t handler, void *pp_private_data) { struct tevent_immediate_list *im_entry; int ret; char c; ssize_t written; ret = pthread_mutex_lock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ return; } if (tp->write_fd == -1) { /* In the process of being destroyed. Ignore. */ goto end; } /* Create a new immediate_list entry. MUST BE ON THE NULL CONTEXT */ im_entry = talloc_zero(NULL, struct tevent_immediate_list); if (im_entry == NULL) { goto end; } im_entry->handler = handler; im_entry->im = talloc_move(im_entry, pp_im); if (pp_private_data != NULL) { void **pptr = (void **)pp_private_data; im_entry->private_ptr = talloc_move(im_entry, pptr); } DLIST_ADD(tp->im_list, im_entry); /* And notify the dest_ev_ctx to wake up. */ c = '\0'; do { written = write(tp->write_fd, &c, 1); } while (written == -1 && errno == EINTR); end: ret = pthread_mutex_unlock(&tp->mutex); if (ret != 0) { abort(); /* Notreached. */ } } #else /* !HAVE_PTHREAD */ struct tevent_thread_proxy *tevent_thread_proxy_create( struct tevent_context *dest_ev_ctx) { errno = ENOSYS; return NULL; } void tevent_thread_proxy_schedule(struct tevent_thread_proxy *tp, struct tevent_immediate **pp_im, tevent_immediate_handler_t handler, void *pp_private_data) { ; } #endif static int tevent_threaded_context_destructor( struct tevent_threaded_context *tctx) { struct tevent_context *main_ev = tevent_wrapper_main_ev(tctx->event_ctx); int ret; if (main_ev != NULL) { DLIST_REMOVE(main_ev->threaded_contexts, tctx); } /* * We have to coordinate with _tevent_threaded_schedule_immediate's * unlock of the event_ctx_mutex. We're in the main thread here, * and we can be scheduled before the helper thread finalizes its * call _tevent_threaded_schedule_immediate. This means we would * pthreadpool_destroy a locked mutex, which is illegal. */ ret = pthread_mutex_lock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } ret = pthread_mutex_unlock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } ret = pthread_mutex_destroy(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } return 0; } struct tevent_threaded_context *tevent_threaded_context_create( TALLOC_CTX *mem_ctx, struct tevent_context *ev) { #ifdef HAVE_PTHREAD struct tevent_context *main_ev = tevent_wrapper_main_ev(ev); struct tevent_threaded_context *tctx; int ret; ret = tevent_common_wakeup_init(main_ev); if (ret != 0) { errno = ret; return NULL; } tctx = talloc(mem_ctx, struct tevent_threaded_context); if (tctx == NULL) { return NULL; } tctx->event_ctx = ev; ret = pthread_mutex_init(&tctx->event_ctx_mutex, NULL); if (ret != 0) { TALLOC_FREE(tctx); return NULL; } DLIST_ADD(main_ev->threaded_contexts, tctx); talloc_set_destructor(tctx, tevent_threaded_context_destructor); return tctx; #else errno = ENOSYS; return NULL; #endif } static int tevent_threaded_schedule_immediate_destructor(struct tevent_immediate *im) { if (im->event_ctx != NULL) { abort(); } return 0; } void _tevent_threaded_schedule_immediate(struct tevent_threaded_context *tctx, struct tevent_immediate *im, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location) { #ifdef HAVE_PTHREAD const char *create_location = im->create_location; struct tevent_context *main_ev = NULL; struct tevent_wrapper_glue *glue = NULL; int ret, wakeup_fd; ret = pthread_mutex_lock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } if (tctx->event_ctx == NULL) { /* * Our event context is already gone. */ ret = pthread_mutex_unlock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } return; } glue = tctx->event_ctx->wrapper.glue; if ((im->event_ctx != NULL) || (handler == NULL)) { abort(); } if (im->destroyed) { abort(); } if (im->busy) { abort(); } main_ev = tevent_wrapper_main_ev(tctx->event_ctx); *im = (struct tevent_immediate) { .event_ctx = tctx->event_ctx, .wrapper = glue, .handler = handler, .private_data = private_data, .handler_name = handler_name, .create_location = create_location, .schedule_location = location, }; /* * Make sure the event won't be destroyed while * it's part of the ev->scheduled_immediates list. * _tevent_schedule_immediate() will reset the destructor * in tevent_common_threaded_activate_immediate(). */ talloc_set_destructor(im, tevent_threaded_schedule_immediate_destructor); ret = pthread_mutex_lock(&main_ev->scheduled_mutex); if (ret != 0) { abort(); } DLIST_ADD_END(main_ev->scheduled_immediates, im); wakeup_fd = main_ev->wakeup_fd; ret = pthread_mutex_unlock(&main_ev->scheduled_mutex); if (ret != 0) { abort(); } ret = pthread_mutex_unlock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } /* * We might want to wake up the main thread under the lock. We * had a slightly similar situation in pthreadpool, changed * with 1c4284c7395f23. This is not exactly the same, as the * wakeup is only a last-resort thing in case the main thread * is sleeping. Doing the wakeup under the lock can easily * lead to a contended mutex, which is much more expensive * than a noncontended one. So I'd opt for the lower footprint * initially. Maybe we have to change that later. */ tevent_common_wakeup_fd(wakeup_fd); #else /* * tevent_threaded_context_create() returned NULL with ENOSYS... */ abort(); #endif } void tevent_common_threaded_activate_immediate(struct tevent_context *ev) { #ifdef HAVE_PTHREAD int ret; ret = pthread_mutex_lock(&ev->scheduled_mutex); if (ret != 0) { abort(); } while (ev->scheduled_immediates != NULL) { struct tevent_immediate *im = ev->scheduled_immediates; struct tevent_immediate copy = *im; DLIST_REMOVE(ev->scheduled_immediates, im); tevent_debug(ev, TEVENT_DEBUG_TRACE, "Schedule immediate event \"%s\": %p from thread into main\n", im->handler_name, im); im->handler_name = NULL; _tevent_schedule_immediate(im, ev, copy.handler, copy.private_data, copy.handler_name, copy.schedule_location); } ret = pthread_mutex_unlock(&ev->scheduled_mutex); if (ret != 0) { abort(); } #else /* * tevent_threaded_context_create() returned NULL with ENOSYS... */ abort(); #endif } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_timed.c0000660000000000000000000002666500000000000015705 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. common events code for timed events Copyright (C) Andrew Tridgell 2003-2006 Copyright (C) Stefan Metzmacher 2005-2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/time.h" #define TEVENT_DEPRECATED 1 #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" /** compare two timeval structures. Return -1 if tv1 < tv2 Return 0 if tv1 == tv2 Return 1 if tv1 > tv2 */ int tevent_timeval_compare(const struct timeval *tv1, const struct timeval *tv2) { if (tv1->tv_sec > tv2->tv_sec) return 1; if (tv1->tv_sec < tv2->tv_sec) return -1; if (tv1->tv_usec > tv2->tv_usec) return 1; if (tv1->tv_usec < tv2->tv_usec) return -1; return 0; } /** return a zero timeval */ struct timeval tevent_timeval_zero(void) { struct timeval tv; tv.tv_sec = 0; tv.tv_usec = 0; return tv; } /** return a timeval for the current time */ struct timeval tevent_timeval_current(void) { struct timeval tv; gettimeofday(&tv, NULL); return tv; } /** return a timeval struct with the given elements */ struct timeval tevent_timeval_set(uint32_t secs, uint32_t usecs) { struct timeval tv; tv.tv_sec = secs; tv.tv_usec = usecs; return tv; } /** return the difference between two timevals as a timeval if tv1 comes after tv2, then return a zero timeval (this is *tv2 - *tv1) */ struct timeval tevent_timeval_until(const struct timeval *tv1, const struct timeval *tv2) { struct timeval t; if (tevent_timeval_compare(tv1, tv2) >= 0) { return tevent_timeval_zero(); } t.tv_sec = tv2->tv_sec - tv1->tv_sec; if (tv1->tv_usec > tv2->tv_usec) { t.tv_sec--; t.tv_usec = 1000000 - (tv1->tv_usec - tv2->tv_usec); } else { t.tv_usec = tv2->tv_usec - tv1->tv_usec; } return t; } /** return true if a timeval is zero */ bool tevent_timeval_is_zero(const struct timeval *tv) { return tv->tv_sec == 0 && tv->tv_usec == 0; } struct timeval tevent_timeval_add(const struct timeval *tv, uint32_t secs, uint32_t usecs) { struct timeval tv2 = *tv; tv2.tv_sec += secs; tv2.tv_usec += usecs; tv2.tv_sec += tv2.tv_usec / 1000000; tv2.tv_usec = tv2.tv_usec % 1000000; return tv2; } /** return a timeval in the future with a specified offset */ struct timeval tevent_timeval_current_ofs(uint32_t secs, uint32_t usecs) { struct timeval tv = tevent_timeval_current(); return tevent_timeval_add(&tv, secs, usecs); } /* destroy a timed event */ static int tevent_common_timed_destructor(struct tevent_timer *te) { if (te->destroyed) { tevent_common_check_double_free(te, "tevent_timer double free"); goto done; } te->destroyed = true; if (te->event_ctx == NULL) { return 0; } tevent_debug(te->event_ctx, TEVENT_DEBUG_TRACE, "Destroying timer event %p \"%s\"\n", te, te->handler_name); if (te->event_ctx->last_zero_timer == te) { te->event_ctx->last_zero_timer = DLIST_PREV(te); } tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH); DLIST_REMOVE(te->event_ctx->timer_events, te); te->event_ctx = NULL; done: if (te->busy) { return -1; } te->wrapper = NULL; return 0; } static void tevent_common_insert_timer(struct tevent_context *ev, struct tevent_timer *te, bool optimize_zero) { struct tevent_timer *prev_te = NULL; if (te->destroyed) { tevent_abort(ev, "tevent_timer use after free"); return; } /* keep the list ordered */ if (optimize_zero && tevent_timeval_is_zero(&te->next_event)) { /* * Some callers use zero tevent_timer * instead of tevent_immediate events. * * As these can happen very often, * we remember the last zero timer * in the list. */ prev_te = ev->last_zero_timer; ev->last_zero_timer = te; } else { struct tevent_timer *cur_te; /* * we traverse the list from the tail * because it's much more likely that * timers are added at the end of the list */ for (cur_te = DLIST_TAIL(ev->timer_events); cur_te != NULL; cur_te = DLIST_PREV(cur_te)) { int ret; /* * if the new event comes before the current * we continue searching */ ret = tevent_timeval_compare(&te->next_event, &cur_te->next_event); if (ret < 0) { continue; } break; } prev_te = cur_te; } tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_ATTACH); DLIST_ADD_AFTER(ev->timer_events, te, prev_te); } /* add a timed event return NULL on failure (memory allocation error) */ static struct tevent_timer *tevent_common_add_timer_internal( struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location, bool optimize_zero) { struct tevent_timer *te; te = talloc(mem_ctx?mem_ctx:ev, struct tevent_timer); if (te == NULL) return NULL; *te = (struct tevent_timer) { .event_ctx = ev, .next_event = next_event, .handler = handler, .private_data = private_data, .handler_name = handler_name, .location = location, }; if (ev->timer_events == NULL) { ev->last_zero_timer = NULL; } tevent_common_insert_timer(ev, te, optimize_zero); talloc_set_destructor(te, tevent_common_timed_destructor); tevent_debug(ev, TEVENT_DEBUG_TRACE, "Added timed event \"%s\": %p\n", handler_name, te); return te; } struct tevent_timer *tevent_common_add_timer(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location) { /* * do not use optimization, there are broken Samba * versions which use tevent_common_add_timer() * without using tevent_common_loop_timer_delay(), * it just uses DLIST_REMOVE(ev->timer_events, te) * and would leave ev->last_zero_timer behind. */ return tevent_common_add_timer_internal(ev, mem_ctx, next_event, handler, private_data, handler_name, location, false); } struct tevent_timer *tevent_common_add_timer_v2(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location) { /* * Here we turn on last_zero_timer optimization */ return tevent_common_add_timer_internal(ev, mem_ctx, next_event, handler, private_data, handler_name, location, true); } void tevent_update_timer(struct tevent_timer *te, struct timeval next_event) { struct tevent_context *ev = te->event_ctx; if (ev->last_zero_timer == te) { te->event_ctx->last_zero_timer = DLIST_PREV(te); } tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH); DLIST_REMOVE(ev->timer_events, te); te->next_event = next_event; /* * Not doing the zero_timer optimization. This is for new code * that should know about immediates. */ tevent_common_insert_timer(ev, te, false); } int tevent_common_invoke_timer_handler(struct tevent_timer *te, struct timeval current_time, bool *removed) { struct tevent_context *handler_ev = te->event_ctx; if (removed != NULL) { *removed = false; } if (te->event_ctx == NULL) { return 0; } /* * We need to remove the timer from the list before calling the * handler because in a semi-async inner event loop called from the * handler we don't want to come across this event again -- vl */ if (te->event_ctx->last_zero_timer == te) { te->event_ctx->last_zero_timer = DLIST_PREV(te); } DLIST_REMOVE(te->event_ctx->timer_events, te); tevent_debug(te->event_ctx, TEVENT_DEBUG_TRACE, "Running timer event %p \"%s\"\n", te, te->handler_name); /* * If the timed event was registered for a zero current_time, * then we pass a zero timeval here too! To avoid the * overhead of gettimeofday() calls. * * otherwise we pass the current time */ te->busy = true; if (te->wrapper != NULL) { handler_ev = te->wrapper->wrap_ev; tevent_wrapper_push_use_internal(handler_ev, te->wrapper); te->wrapper->ops->before_timer_handler( te->wrapper->wrap_ev, te->wrapper->private_state, te->wrapper->main_ev, te, te->next_event, current_time, te->handler_name, te->location); } tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_BEFORE_HANDLER); te->handler(handler_ev, te, current_time, te->private_data); if (te->wrapper != NULL) { te->wrapper->ops->after_timer_handler( te->wrapper->wrap_ev, te->wrapper->private_state, te->wrapper->main_ev, te, te->next_event, current_time, te->handler_name, te->location); tevent_wrapper_pop_use_internal(handler_ev, te->wrapper); } te->busy = false; tevent_debug(te->event_ctx, TEVENT_DEBUG_TRACE, "Ending timer event %p \"%s\"\n", te, te->handler_name); /* The callback was already called when freed from the handler. */ if (!te->destroyed) { tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH); } te->wrapper = NULL; te->event_ctx = NULL; talloc_set_destructor(te, NULL); TALLOC_FREE(te); if (removed != NULL) { *removed = true; } return 0; } /* do a single event loop using the events defined in ev return the delay until the next timed event, or zero if a timed event was triggered */ struct timeval tevent_common_loop_timer_delay(struct tevent_context *ev) { struct timeval current_time = tevent_timeval_zero(); struct tevent_timer *te = ev->timer_events; int ret; if (!te) { /* have a default tick time of 30 seconds. This guarantees that code that uses its own timeout checking will be able to proceed eventually */ return tevent_timeval_set(30, 0); } /* * work out the right timeout for the next timed event * * avoid the syscall to gettimeofday() if the timed event should * be triggered directly * * if there's a delay till the next timed event, we're done * with just returning the delay */ if (!tevent_timeval_is_zero(&te->next_event)) { struct timeval delay; current_time = tevent_timeval_current(); delay = tevent_timeval_until(¤t_time, &te->next_event); if (!tevent_timeval_is_zero(&delay)) { return delay; } } /* * ok, we have a timed event that we'll process ... */ ret = tevent_common_invoke_timer_handler(te, current_time, NULL); if (ret != 0) { tevent_abort(ev, "tevent_common_invoke_timer_handler() failed"); } return tevent_timeval_zero(); } void tevent_timer_set_tag(struct tevent_timer *te, uint64_t tag) { if (te == NULL) { return; } te->tag = tag; } uint64_t tevent_timer_get_tag(const struct tevent_timer *te) { if (te == NULL) { return 0; } return te->tag; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_util.c0000660000000000000000000000350600000000000015545 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Copyright (C) Andrew Tridgell 2005 Copyright (C) Jelmer Vernooij 2005 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "talloc.h" #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" #include /** Set a fd into blocking/nonblocking mode. Uses POSIX O_NONBLOCK if available, else if SYSV use O_NDELAY if BSD use FNDELAY **/ int ev_set_blocking(int fd, bool set) { int val; #ifdef O_NONBLOCK #define FLAG_TO_SET O_NONBLOCK #else #ifdef SYSV #define FLAG_TO_SET O_NDELAY #else /* BSD */ #define FLAG_TO_SET FNDELAY #endif #endif if((val = fcntl(fd, F_GETFL, 0)) == -1) return -1; if(set) /* Turn blocking on - ie. clear nonblock flag */ val &= ~FLAG_TO_SET; else val |= FLAG_TO_SET; return fcntl( fd, F_SETFL, val); #undef FLAG_TO_SET } bool ev_set_close_on_exec(int fd) { #ifdef FD_CLOEXEC int val; val = fcntl(fd, F_GETFD, 0); if (val >= 0) { val |= FD_CLOEXEC; val = fcntl(fd, F_SETFD, val); if (val != -1) { return true; } } #endif return false; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/tevent_util.h0000660000000000000000000001121600000000000015547 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Copyright (C) Andrew Tridgell 1998-2010 Copyright (C) Jelmer Vernooij 2005 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* To use these macros you must have a structure containing a next and prev pointer */ #ifndef _DLINKLIST_H #define _DLINKLIST_H /* February 2010 - changed list format to have a prev pointer from the list head. This makes DLIST_ADD_END() O(1) even though we only have one list pointer. The scheme is as follows: 1) with no entries in the list: list_head == NULL 2) with 1 entry in the list: list_head->next == NULL list_head->prev == list_head 3) with 2 entries in the list: list_head->next == element2 list_head->prev == element2 element2->prev == list_head element2->next == NULL 4) with N entries in the list: list_head->next == element2 list_head->prev == elementN elementN->prev == element{N-1} elementN->next == NULL This allows us to find the tail of the list by using list_head->prev, which means we can add to the end of the list in O(1) time */ /* add an element at the front of a list */ #define DLIST_ADD(list, p) \ do { \ if (!(list)) { \ (p)->prev = (list) = (p); \ (p)->next = NULL; \ } else { \ (p)->prev = (list)->prev; \ (list)->prev = (p); \ (p)->next = (list); \ (list) = (p); \ } \ } while (0) /* remove an element from a list Note that the element doesn't have to be in the list. If it isn't then this is a no-op */ #define DLIST_REMOVE(list, p) \ do { \ if ((p) == (list)) { \ if ((p)->next) (p)->next->prev = (p)->prev; \ (list) = (p)->next; \ } else if ((p)->prev && (list) && (p) == (list)->prev) { \ (p)->prev->next = NULL; \ (list)->prev = (p)->prev; \ } else { \ if ((p)->prev) (p)->prev->next = (p)->next; \ if ((p)->next) (p)->next->prev = (p)->prev; \ } \ if ((p) != (list)) (p)->next = (p)->prev = NULL; \ } while (0) /* find the head of the list given any element in it. Note that this costs O(N), so you should avoid this macro if at all possible! */ #define DLIST_HEAD(p, result_head) \ do { \ (result_head) = (p); \ while (DLIST_PREV(result_head)) (result_head) = (result_head)->prev; \ } while(0) /* return the last element in the list */ #define DLIST_TAIL(list) ((list)?(list)->prev:NULL) /* return the previous element in the list. */ #define DLIST_PREV(p) (((p)->prev && (p)->prev->next != NULL)?(p)->prev:NULL) /* insert 'p' after the given element 'el' in a list. If el is NULL then this is the same as a DLIST_ADD() */ #define DLIST_ADD_AFTER(list, p, el) \ do { \ if (!(list) || !(el)) { \ DLIST_ADD(list, p); \ } else { \ (p)->prev = (el); \ (p)->next = (el)->next; \ (el)->next = (p); \ if ((p)->next) (p)->next->prev = (p); \ if ((list)->prev == (el)) (list)->prev = (p); \ }\ } while (0) /* add to the end of a list. */ #define DLIST_ADD_END(list, p) \ do { \ if (!(list)) { \ DLIST_ADD(list, p); \ } else { \ DLIST_ADD_AFTER(list, p, (list)->prev); \ } \ } while (0) /* promote an element to the front of a list */ #define DLIST_PROMOTE(list, p) \ do { \ DLIST_REMOVE(list, p); \ DLIST_ADD(list, p); \ } while (0) /* demote an element to the end of a list. */ #define DLIST_DEMOTE(list, p) \ do { \ DLIST_REMOVE(list, p); \ DLIST_ADD_END(list, p); \ } while (0) /* concatenate two lists - putting all elements of the 2nd list at the end of the first list. */ #define DLIST_CONCATENATE(list1, list2) \ do { \ if (!(list1)) { \ (list1) = (list2); \ } else { \ (list1)->prev->next = (list2); \ if (list2) { \ void *_tmplist = (void *)(list1)->prev; \ (list1)->prev = (list2)->prev; \ (list2)->prev = _tmplist; \ } \ } \ } while (0) #endif /* _DLINKLIST_H */ int ev_set_blocking(int fd, bool set); bool ev_set_close_on_exec(int fd); /* Defined here so we can build against older talloc versions that don't * have this define yet. */ #ifndef TALLOC_FREE #define TALLOC_FREE(ctx) do { talloc_free(ctx); ctx=NULL; } while(0) #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_wakeup.c0000660000000000000000000000341600000000000016064 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Infrastructure for async requests Copyright (C) Volker Lendecke 2008 Copyright (C) Stefan Metzmacher 2009 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" struct tevent_wakeup_state { struct timeval wakeup_time; }; struct tevent_req *tevent_wakeup_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct timeval wakeup_time) { struct tevent_req *req; struct tevent_wakeup_state *state; req = tevent_req_create(mem_ctx, &state, struct tevent_wakeup_state); if (!req) { return NULL; } state->wakeup_time = wakeup_time; if (!tevent_req_set_endtime(req, ev, wakeup_time)) { return tevent_req_post(req, ev); } return req; } bool tevent_wakeup_recv(struct tevent_req *req) { enum tevent_req_state state; uint64_t error; if (tevent_req_is_error(req, &state, &error)) { if (state == TEVENT_REQ_TIMED_OUT) { return true; } } return false; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0252056 tevent-0.11.0/tevent_wrapper.c0000660000000000000000000003176500000000000016260 0ustar00rootroot00000000000000/* Infrastructure for event context wrappers Copyright (C) Stefan Metzmacher 2014 ** NOTE! The following LGPL license applies to the tevent ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #ifdef HAVE_PTHREAD #include "system/threads.h" #endif #define TEVENT_DEPRECATED 1 #include "tevent.h" #include "tevent_internal.h" #include "tevent_util.h" static int tevent_wrapper_glue_context_init(struct tevent_context *ev) { tevent_abort(ev, "tevent_wrapper_glue_context_init() called"); errno = ENOSYS; return -1; } static struct tevent_fd *tevent_wrapper_glue_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int fd, uint16_t flags, tevent_fd_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct tevent_wrapper_glue *glue = ev->wrapper.glue; struct tevent_fd *fde = NULL; if (glue->destroyed) { tevent_abort(ev, "add_fd wrapper use after free"); return NULL; } if (glue->main_ev == NULL) { errno = EINVAL; return NULL; } fde = _tevent_add_fd(glue->main_ev, mem_ctx, fd, flags, handler, private_data, handler_name, location); if (fde == NULL) { return NULL; } fde->wrapper = glue; return fde; } static struct tevent_timer *tevent_wrapper_glue_add_timer(struct tevent_context *ev, TALLOC_CTX *mem_ctx, struct timeval next_event, tevent_timer_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct tevent_wrapper_glue *glue = ev->wrapper.glue; struct tevent_timer *te = NULL; if (glue->destroyed) { tevent_abort(ev, "add_timer wrapper use after free"); return NULL; } if (glue->main_ev == NULL) { errno = EINVAL; return NULL; } te = _tevent_add_timer(glue->main_ev, mem_ctx, next_event, handler, private_data, handler_name, location); if (te == NULL) { return NULL; } te->wrapper = glue; return te; } static void tevent_wrapper_glue_schedule_immediate(struct tevent_immediate *im, struct tevent_context *ev, tevent_immediate_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct tevent_wrapper_glue *glue = ev->wrapper.glue; if (glue->destroyed) { tevent_abort(ev, "scheduke_immediate wrapper use after free"); return; } if (glue->main_ev == NULL) { tevent_abort(ev, location); errno = EINVAL; return; } _tevent_schedule_immediate(im, glue->main_ev, handler, private_data, handler_name, location); im->wrapper = glue; return; } static struct tevent_signal *tevent_wrapper_glue_add_signal(struct tevent_context *ev, TALLOC_CTX *mem_ctx, int signum, int sa_flags, tevent_signal_handler_t handler, void *private_data, const char *handler_name, const char *location) { struct tevent_wrapper_glue *glue = ev->wrapper.glue; struct tevent_signal *se = NULL; if (glue->destroyed) { tevent_abort(ev, "add_signal wrapper use after free"); return NULL; } if (glue->main_ev == NULL) { errno = EINVAL; return NULL; } se = _tevent_add_signal(glue->main_ev, mem_ctx, signum, sa_flags, handler, private_data, handler_name, location); if (se == NULL) { return NULL; } se->wrapper = glue; return se; } static int tevent_wrapper_glue_loop_once(struct tevent_context *ev, const char *location) { tevent_abort(ev, "tevent_wrapper_glue_loop_once() called"); errno = ENOSYS; return -1; } static int tevent_wrapper_glue_loop_wait(struct tevent_context *ev, const char *location) { tevent_abort(ev, "tevent_wrapper_glue_loop_wait() called"); errno = ENOSYS; return -1; } static const struct tevent_ops tevent_wrapper_glue_ops = { .context_init = tevent_wrapper_glue_context_init, .add_fd = tevent_wrapper_glue_add_fd, .set_fd_close_fn = tevent_common_fd_set_close_fn, .get_fd_flags = tevent_common_fd_get_flags, .set_fd_flags = tevent_common_fd_set_flags, .add_timer = tevent_wrapper_glue_add_timer, .schedule_immediate = tevent_wrapper_glue_schedule_immediate, .add_signal = tevent_wrapper_glue_add_signal, .loop_once = tevent_wrapper_glue_loop_once, .loop_wait = tevent_wrapper_glue_loop_wait, }; static int tevent_wrapper_context_destructor(struct tevent_context *wrap_ev) { struct tevent_wrapper_glue *glue = wrap_ev->wrapper.glue; struct tevent_context *main_ev = NULL; struct tevent_fd *fd = NULL, *fn = NULL; struct tevent_timer *te = NULL, *tn = NULL; struct tevent_immediate *ie = NULL, *in = NULL; struct tevent_signal *se = NULL, *sn = NULL; #ifdef HAVE_PTHREAD struct tevent_threaded_context *tctx = NULL, *tctxn = NULL; #endif if (glue == NULL) { tevent_abort(wrap_ev, "tevent_wrapper_context_destructor() active on main"); /* static checker support, return below is never reached */ return -1; } if (glue->destroyed && glue->busy) { tevent_common_check_double_free(wrap_ev, "tevent_context wrapper double free"); } glue->destroyed = true; if (glue->busy) { return -1; } main_ev = glue->main_ev; if (main_ev == NULL) { return 0; } tevent_debug(wrap_ev, TEVENT_DEBUG_TRACE, "Destroying wrapper context %p \"%s\"\n", wrap_ev, talloc_get_name(glue->private_state)); glue->main_ev = NULL; DLIST_REMOVE(main_ev->wrapper.list, glue); #ifdef HAVE_PTHREAD for (tctx = main_ev->threaded_contexts; tctx != NULL; tctx = tctxn) { int ret; tctxn = tctx->next; if (tctx->event_ctx != glue->wrap_ev) { continue; } ret = pthread_mutex_lock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } /* * Indicate to the thread that the tevent_context is * gone. The counterpart of this is in * _tevent_threaded_schedule_immediate, there we read * this under the threaded_context's mutex. */ tctx->event_ctx = NULL; ret = pthread_mutex_unlock(&tctx->event_ctx_mutex); if (ret != 0) { abort(); } DLIST_REMOVE(main_ev->threaded_contexts, tctx); } #endif for (fd = main_ev->fd_events; fd; fd = fn) { fn = fd->next; if (fd->wrapper != glue) { continue; } tevent_fd_set_flags(fd, 0); fd->wrapper = NULL; fd->event_ctx = NULL; DLIST_REMOVE(main_ev->fd_events, fd); } for (te = main_ev->timer_events; te; te = tn) { tn = te->next; if (te->wrapper != glue) { continue; } te->wrapper = NULL; te->event_ctx = NULL; if (main_ev->last_zero_timer == te) { main_ev->last_zero_timer = DLIST_PREV(te); } DLIST_REMOVE(main_ev->timer_events, te); } for (ie = main_ev->immediate_events; ie; ie = in) { in = ie->next; if (ie->wrapper != glue) { continue; } ie->wrapper = NULL; ie->event_ctx = NULL; ie->cancel_fn = NULL; DLIST_REMOVE(main_ev->immediate_events, ie); } for (se = main_ev->signal_events; se; se = sn) { sn = se->next; if (se->wrapper != glue) { continue; } se->wrapper = NULL; tevent_cleanup_pending_signal_handlers(se); } return 0; } struct tevent_context *_tevent_context_wrapper_create(struct tevent_context *main_ev, TALLOC_CTX *mem_ctx, const struct tevent_wrapper_ops *ops, void *pstate, size_t psize, const char *type, const char *location) { void **ppstate = (void **)pstate; struct tevent_context *ev = NULL; if (main_ev->wrapper.glue != NULL) { /* * stacking of wrappers is not supported */ tevent_debug(main_ev->wrapper.glue->main_ev, TEVENT_DEBUG_FATAL, "%s: %s() stacking not allowed\n", __func__, location); errno = EINVAL; return NULL; } if (main_ev->nesting.allowed) { /* * wrappers conflict with nesting */ tevent_debug(main_ev, TEVENT_DEBUG_FATAL, "%s: %s() conflicts with nesting\n", __func__, location); errno = EINVAL; return NULL; } ev = talloc_zero(mem_ctx, struct tevent_context); if (ev == NULL) { return NULL; } ev->ops = &tevent_wrapper_glue_ops; ev->wrapper.glue = talloc_zero(ev, struct tevent_wrapper_glue); if (ev->wrapper.glue == NULL) { talloc_free(ev); return NULL; } talloc_set_destructor(ev, tevent_wrapper_context_destructor); ev->wrapper.glue->wrap_ev = ev; ev->wrapper.glue->main_ev = main_ev; ev->wrapper.glue->ops = ops; ev->wrapper.glue->private_state = talloc_zero_size(ev->wrapper.glue, psize); if (ev->wrapper.glue->private_state == NULL) { talloc_free(ev); return NULL; } talloc_set_name_const(ev->wrapper.glue->private_state, type); DLIST_ADD_END(main_ev->wrapper.list, ev->wrapper.glue); *ppstate = ev->wrapper.glue->private_state; return ev; } bool tevent_context_is_wrapper(struct tevent_context *ev) { if (ev->wrapper.glue != NULL) { return true; } return false; } _PRIVATE_ struct tevent_context *tevent_wrapper_main_ev(struct tevent_context *ev) { if (ev == NULL) { return NULL; } if (ev->wrapper.glue == NULL) { return ev; } return ev->wrapper.glue->main_ev; } /* * 32 stack elements should be more than enough * * e.g. Samba uses just 8 elements for [un]become_{root,user}() */ #define TEVENT_WRAPPER_STACK_SIZE 32 static struct tevent_wrapper_stack { const void *ev_ptr; const struct tevent_wrapper_glue *wrapper; } wrapper_stack[TEVENT_WRAPPER_STACK_SIZE]; static size_t wrapper_stack_idx; _PRIVATE_ void tevent_wrapper_push_use_internal(struct tevent_context *ev, struct tevent_wrapper_glue *wrapper) { /* * ev and wrapper need to belong together! * It's also fine to only have a raw ev * without a wrapper. */ if (unlikely(ev->wrapper.glue != wrapper)) { tevent_abort(ev, "tevent_wrapper_push_use_internal() invalid arguments"); return; } if (wrapper != NULL) { if (unlikely(wrapper->busy)) { tevent_abort(ev, "wrapper already busy!"); return; } wrapper->busy = true; } if (unlikely(wrapper_stack_idx >= TEVENT_WRAPPER_STACK_SIZE)) { tevent_abort(ev, "TEVENT_WRAPPER_STACK_SIZE overflow"); return; } wrapper_stack[wrapper_stack_idx] = (struct tevent_wrapper_stack) { .ev_ptr = ev, .wrapper = wrapper, }; wrapper_stack_idx++; } _PRIVATE_ void tevent_wrapper_pop_use_internal(const struct tevent_context *__ev_ptr, struct tevent_wrapper_glue *wrapper) { struct tevent_context *main_ev = NULL; /* * Note that __ev_ptr might a a stale pointer and should not * be touched, we just compare the pointer value in order * to enforce the stack order. */ if (wrapper != NULL) { main_ev = wrapper->main_ev; } if (unlikely(wrapper_stack_idx == 0)) { tevent_abort(main_ev, "tevent_wrapper stack already empty"); return; } wrapper_stack_idx--; if (wrapper != NULL) { wrapper->busy = false; } if (wrapper_stack[wrapper_stack_idx].ev_ptr != __ev_ptr) { tevent_abort(main_ev, "tevent_wrapper_pop_use mismatch ev!"); return; } if (wrapper_stack[wrapper_stack_idx].wrapper != wrapper) { tevent_abort(main_ev, "tevent_wrapper_pop_use mismatch wrap!"); return; } if (wrapper == NULL) { return; } if (wrapper->destroyed) { /* * Notice that we can't use TALLOC_FREE() * here because wrapper is a talloc child * of wrapper->wrap_ev. */ talloc_free(wrapper->wrap_ev); } } bool _tevent_context_push_use(struct tevent_context *ev, const char *location) { bool ok; if (ev->wrapper.glue == NULL) { tevent_wrapper_push_use_internal(ev, NULL); return true; } if (ev->wrapper.glue->main_ev == NULL) { return false; } tevent_wrapper_push_use_internal(ev, ev->wrapper.glue); ok = ev->wrapper.glue->ops->before_use(ev->wrapper.glue->wrap_ev, ev->wrapper.glue->private_state, ev->wrapper.glue->main_ev, location); if (!ok) { tevent_wrapper_pop_use_internal(ev, ev->wrapper.glue); return false; } return true; } void _tevent_context_pop_use(struct tevent_context *ev, const char *location) { tevent_wrapper_pop_use_internal(ev, ev->wrapper.glue); if (ev->wrapper.glue == NULL) { return; } if (ev->wrapper.glue->main_ev == NULL) { return; } ev->wrapper.glue->ops->after_use(ev->wrapper.glue->wrap_ev, ev->wrapper.glue->private_state, ev->wrapper.glue->main_ev, location); } bool tevent_context_same_loop(struct tevent_context *ev1, struct tevent_context *ev2) { struct tevent_context *main_ev1 = tevent_wrapper_main_ev(ev1); struct tevent_context *main_ev2 = tevent_wrapper_main_ev(ev2); if (main_ev1 == NULL) { return false; } if (main_ev1 == main_ev2) { return true; } return false; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/wscript0000660000000000000000000001350100000000000014451 0ustar00rootroot00000000000000#!/usr/bin/env python APPNAME = 'tevent' VERSION = '0.11.0' import sys, os # find the buildtools directory top = '.' while not os.path.exists(top+'/buildtools') and len(top.split('/')) < 5: top = top + '/..' sys.path.insert(0, top + '/buildtools/wafsamba') out = 'bin' import wafsamba from wafsamba import samba_dist, samba_utils from waflib import Options, Logs, Context, Errors samba_dist.DIST_DIRS('''lib/tevent:. lib/replace:lib/replace lib/talloc:lib/talloc buildtools:buildtools third_party/cmocka:third_party/cmocka third_party/waf:third_party/waf''') def options(opt): opt.BUILTIN_DEFAULT('replace') opt.PRIVATE_EXTENSION_DEFAULT('tevent', noextension='tevent') opt.RECURSE('lib/replace') opt.RECURSE('lib/talloc') def configure(conf): conf.RECURSE('lib/replace') conf.RECURSE('lib/talloc') if conf.CHECK_FOR_THIRD_PARTY(): conf.RECURSE('third_party/cmocka') else: if not conf.CHECK_CMOCKA(): raise Errors.WafError('cmocka development package have not been found.\nIf third_party is installed, check that it is in the proper place.') else: conf.define('USING_SYSTEM_CMOCKA', 1) conf.env.standalone_tevent = conf.IN_LAUNCH_DIR() if not conf.env.standalone_tevent: if conf.CHECK_BUNDLED_SYSTEM_PKG('tevent', minversion=VERSION, onlyif='talloc', implied_deps='replace talloc'): conf.define('USING_SYSTEM_TEVENT', 1) if not conf.env.disable_python and \ conf.CHECK_BUNDLED_SYSTEM_PYTHON('pytevent', 'tevent', minversion=VERSION): conf.define('USING_SYSTEM_PYTEVENT', 1) if conf.CHECK_FUNCS('epoll_create', headers='sys/epoll.h'): conf.DEFINE('HAVE_EPOLL', 1) tevent_num_signals = 64 v = conf.CHECK_VALUEOF('NSIG', headers='signal.h') if v is not None: tevent_num_signals = max(tevent_num_signals, v) v = conf.CHECK_VALUEOF('_NSIG', headers='signal.h') if v is not None: tevent_num_signals = max(tevent_num_signals, v) v = conf.CHECK_VALUEOF('SIGRTMAX', headers='signal.h') if v is not None: tevent_num_signals = max(tevent_num_signals, v) v = conf.CHECK_VALUEOF('SIGRTMIN', headers='signal.h') if v is not None: tevent_num_signals = max(tevent_num_signals, v*2) if not conf.CONFIG_SET('USING_SYSTEM_TEVENT'): conf.DEFINE('TEVENT_NUM_SIGNALS', tevent_num_signals) conf.SAMBA_CHECK_PYTHON() conf.SAMBA_CHECK_PYTHON_HEADERS() conf.SAMBA_CONFIG_H() conf.SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS() def build(bld): bld.RECURSE('lib/replace') bld.RECURSE('lib/talloc') if bld.CHECK_FOR_THIRD_PARTY(): bld.RECURSE('third_party/cmocka') SRC = '''tevent.c tevent_debug.c tevent_fd.c tevent_immediate.c tevent_queue.c tevent_req.c tevent_wrapper.c tevent_poll.c tevent_threads.c tevent_signal.c tevent_standard.c tevent_timed.c tevent_util.c tevent_wakeup.c''' if bld.CONFIG_SET('HAVE_EPOLL'): SRC += ' tevent_epoll.c' if bld.CONFIG_SET('HAVE_SOLARIS_PORTS'): SRC += ' tevent_port.c' if bld.env.standalone_tevent: bld.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig' private_library = False else: private_library = True if not bld.CONFIG_SET('USING_SYSTEM_TEVENT'): tevent_deps = 'replace talloc' if bld.CONFIG_SET('HAVE_PTHREAD'): tevent_deps += ' pthread' bld.SAMBA_LIBRARY('tevent', SRC, deps=tevent_deps, enabled= not bld.CONFIG_SET('USING_SYSTEM_TEVENT'), includes='.', abi_directory='ABI', abi_match='tevent_* _tevent_*', vnum=VERSION, public_headers=('' if private_library else 'tevent.h'), public_headers_install=not private_library, pc_files='tevent.pc', private_library=private_library) if not bld.CONFIG_SET('USING_SYSTEM_PYTEVENT') and not bld.env.disable_python: bld.SAMBA_PYTHON('_tevent', 'pytevent.c', deps='tevent', realname='_tevent.so', cflags='-DPACKAGE_VERSION=\"%s\"' % VERSION) bld.INSTALL_WILDCARD('${PYTHONARCHDIR}', 'tevent.py', flat=False) # install out various python scripts for use by make test bld.SAMBA_SCRIPT('tevent_python', pattern='tevent.py', installdir='python') bld.SAMBA_BINARY('test_tevent_tag', source='tests/test_tevent_tag.c', deps='cmocka tevent', install=False) bld.SAMBA_BINARY('test_tevent_trace', source='tests/test_tevent_trace.c', deps='cmocka tevent', install=False) def test(ctx): '''test tevent''' print("The tevent testsuite is part of smbtorture in samba4") samba_utils.ADD_LD_LIBRARY_PATH('bin/shared') samba_utils.ADD_LD_LIBRARY_PATH('bin/shared/private') pyret = samba_utils.RUN_PYTHON_TESTS(['bindings.py']) unit_test_ret = 0 unit_tests = [ 'test_tevent_tag', 'test_tevent_trace', ] for unit_test in unit_tests: unit_test_cmd = os.path.join(Context.g_module.out, unit_test) unit_test_ret = unit_test_ret or samba_utils.RUN_COMMAND(unit_test_cmd) sys.exit(pyret or unit_test_ret) def dist(): '''makes a tarball for distribution''' samba_dist.dist() def reconfigure(ctx): '''reconfigure if config scripts have changed''' samba_utils.reconfigure(ctx) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/.checker_innocent0000660000000000000000000000024600000000000020520 0ustar00rootroot00000000000000>>>MISTAKE21_create_files_6a9e68ada99a97cb >>>MISTAKE21_os2_delete_9b2bfa7f38711d09 >>>MISTAKE21_os2_delete_2fcc29aaa99a97cb >>>SECURITY2_os2_delete_9b2bfa7f1c9396ca ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/Makefile0000660000000000000000000000153400000000000016657 0ustar00rootroot00000000000000# simple makefile wrapper to run waf WAF_BINARY=$(PYTHON) ../../buildtools/bin/waf WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: touch .tmplock WAFLOCK=.tmplock $(WAF) dist distcheck: touch .tmplock WAFLOCK=.tmplock $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags bin/%:: FORCE $(WAF) --targets=`basename $@` FORCE: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/README0000660000000000000000000000305100000000000016073 0ustar00rootroot00000000000000This subsystem ensures that we can always use a certain core set of functions and types, that are either provided by the OS or by replacement functions / definitions in this subsystem. The aim is to try to stick to POSIX functions in here as much as possible. Convenience functions that are available on no platform at all belong in other subsystems (such as LIBUTIL). The following functions are guaranteed: ftruncate strlcpy strlcat mktime rename initgroups memmove strdup setlinebuf vsyslog timegm setenv unsetenv strndup strnlen waitpid seteuid setegid asprintf snprintf vasprintf vsnprintf opendir readdir telldir seekdir clock_gettime closedir dlopen dlclose dlsym dlerror chroot bzero strerror errno mkdtemp mkstemp (a secure one!) pread pwrite chown lchown readline (the library) inet_ntoa inet_ntop inet_pton inet_aton strtoll strtoull socketpair strptime getaddrinfo freeaddrinfo getnameinfo gai_strerror getifaddrs freeifaddrs utime utimes dup2 link readlink symlink realpath poll setproctitle memset_s Types: bool socklen_t uint{8,16,32,64}_t int{8,16,32,64}_t intptr_t sig_atomic_t blksize_t blkcnt_t Constants: PATH_NAME_MAX UINT{16,32,64}_MAX INT32_MAX RTLD_LAZY HOST_NAME_MAX UINT16_MAX UINT32_MAX UINT64_MAX CHAR_BIT Macros: va_copy __FUNCTION__ __FILE__ __LINE__ __LINESTR__ __location__ __STRING __STRINGSTRING MIN MAX QSORT_CAST ZERO_STRUCT ZERO_STRUCTP ZERO_STRUCTPN ZERO_ARRAY ARRAY_SIZE PTR_DIFF Headers: stdint.h stdbool.h Optional C keywords: volatile Prerequisites: memset (for bzero) syslog (for vsyslog) mktemp (for mkstemp and mkdtemp) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/replace/closefrom.c0000660000000000000000000000516200000000000017355 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * Samba utility functions * Copyright (C) Volker Lendecke 2016 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include #include #include static int closefrom_sysconf(int lower) { long max_files, fd; max_files = sysconf(_SC_OPEN_MAX); if (max_files == -1) { max_files = 65536; } for (fd=lower; fdd_name, &endptr, 10); if ((fd == 0) && (errno == EINVAL)) { continue; } if ((fd == ULLONG_MAX) && (errno == ERANGE)) { continue; } if (*endptr != '\0') { continue; } if (fd == dir_fd) { continue; } if (fd > INT_MAX) { continue; } if (fd < lower) { continue; } if (num_fds >= (fd_array_size / sizeof(int))) { void *tmp; if (fd_array_size == 0) { fd_array_size = 16 * sizeof(int); } else { if (fd_array_size + fd_array_size < fd_array_size) { /* overflow */ goto fail; } fd_array_size = fd_array_size + fd_array_size; } tmp = realloc(fds, fd_array_size); if (tmp == NULL) { goto fail; } fds = tmp; } fds[num_fds++] = fd; } for (i=0; i * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" bool nss_wrapper_enabled(void) { return false; } bool nss_wrapper_hosts_enabled(void) { return false; } bool socket_wrapper_enabled(void) { return false; } bool uid_wrapper_enabled(void) { return false; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/dlfcn.c0000660000000000000000000000354300000000000016453 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Samba system utilities Copyright (C) Andrew Tridgell 1992-1998 Copyright (C) Jeremy Allison 1998-2002 Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #ifdef HAVE_DL_H #include #endif #ifndef HAVE_DLOPEN #ifdef DLOPEN_TAKES_UNSIGNED_FLAGS void *rep_dlopen(const char *name, unsigned int flags) #else void *rep_dlopen(const char *name, int flags) #endif { #ifdef HAVE_SHL_LOAD if (name == NULL) return PROG_HANDLE; return (void *)shl_load(name, flags, 0); #else return NULL; #endif } #endif #ifndef HAVE_DLSYM void *rep_dlsym(void *handle, const char *symbol) { #ifdef HAVE_SHL_FINDSYM void *sym_addr; if (!shl_findsym((shl_t *)&handle, symbol, TYPE_UNDEFINED, &sym_addr)) return sym_addr; #endif return NULL; } #endif #ifndef HAVE_DLERROR char *rep_dlerror(void) { return "dynamic loading of objects not supported on this platform"; } #endif #ifndef HAVE_DLCLOSE int rep_dlclose(void *handle) { #ifdef HAVE_SHL_CLOSE return shl_unload((shl_t)handle); #else return 0; #endif } #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/getaddrinfo.c0000660000000000000000000002454400000000000017657 0ustar00rootroot00000000000000/* PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ /*------------------------------------------------------------------------- * * getaddrinfo.c * Support getaddrinfo() on platforms that don't have it. * * We also supply getnameinfo() here, assuming that the platform will have * it if and only if it has getaddrinfo(). If this proves false on some * platform, we'll need to split this file and provide a separate configure * test for getnameinfo(). * * Copyright (c) 2003-2007, PostgreSQL Global Development Group * * Copyright (C) 2007 Jeremy Allison. * Modified to return multiple IPv4 addresses for Samba. * *------------------------------------------------------------------------- */ #include "replace.h" #include "system/network.h" #ifndef SMB_MALLOC #define SMB_MALLOC(s) malloc(s) #endif #ifndef SMB_STRDUP #define SMB_STRDUP(s) strdup(s) #endif static int check_hostent_err(struct hostent *hp) { if (!hp) { switch (h_errno) { case HOST_NOT_FOUND: case NO_DATA: return EAI_NONAME; case TRY_AGAIN: return EAI_AGAIN; case NO_RECOVERY: default: return EAI_FAIL; } } if (!hp->h_name || hp->h_addrtype != AF_INET) { return EAI_FAIL; } return 0; } static char *canon_name_from_hostent(struct hostent *hp, int *perr) { char *ret = NULL; *perr = check_hostent_err(hp); if (*perr) { return NULL; } ret = SMB_STRDUP(hp->h_name); if (!ret) { *perr = EAI_MEMORY; } return ret; } static char *get_my_canon_name(int *perr) { char name[HOST_NAME_MAX+1]; if (gethostname(name, HOST_NAME_MAX) == -1) { *perr = EAI_FAIL; return NULL; } /* Ensure null termination. */ name[HOST_NAME_MAX] = '\0'; return canon_name_from_hostent(gethostbyname(name), perr); } static char *get_canon_name_from_addr(struct in_addr ip, int *perr) { return canon_name_from_hostent( gethostbyaddr(&ip, sizeof(ip), AF_INET), perr); } static struct addrinfo *alloc_entry(const struct addrinfo *hints, struct in_addr ip, unsigned short port) { struct sockaddr_in *psin = NULL; struct addrinfo *ai = SMB_MALLOC(sizeof(*ai)); if (!ai) { return NULL; } memset(ai, '\0', sizeof(*ai)); psin = SMB_MALLOC(sizeof(*psin)); if (!psin) { free(ai); return NULL; } memset(psin, '\0', sizeof(*psin)); psin->sin_family = AF_INET; psin->sin_port = htons(port); psin->sin_addr = ip; ai->ai_flags = 0; ai->ai_family = AF_INET; ai->ai_socktype = hints->ai_socktype; ai->ai_protocol = hints->ai_protocol; ai->ai_addrlen = sizeof(*psin); ai->ai_addr = (struct sockaddr *) psin; ai->ai_canonname = NULL; ai->ai_next = NULL; return ai; } /* * get address info for a single ipv4 address. * * Bugs: - servname can only be a number, not text. */ static int getaddr_info_single_addr(const char *service, uint32_t addr, const struct addrinfo *hints, struct addrinfo **res) { struct addrinfo *ai = NULL; struct in_addr ip; unsigned short port = 0; if (service) { port = (unsigned short)atoi(service); } ip.s_addr = htonl(addr); ai = alloc_entry(hints, ip, port); if (!ai) { return EAI_MEMORY; } /* If we're asked for the canonical name, * make sure it returns correctly. */ if (!(hints->ai_flags & AI_NUMERICSERV) && hints->ai_flags & AI_CANONNAME) { int err; if (addr == INADDR_LOOPBACK || addr == INADDR_ANY) { ai->ai_canonname = get_my_canon_name(&err); } else { ai->ai_canonname = get_canon_name_from_addr(ip,&err); } if (ai->ai_canonname == NULL) { freeaddrinfo(ai); return err; } } *res = ai; return 0; } /* * get address info for multiple ipv4 addresses. * * Bugs: - servname can only be a number, not text. */ static int getaddr_info_name(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { struct addrinfo *listp = NULL, *prevp = NULL; char **pptr = NULL; int err; struct hostent *hp = NULL; unsigned short port = 0; if (service) { port = (unsigned short)atoi(service); } hp = gethostbyname(node); err = check_hostent_err(hp); if (err) { return err; } for(pptr = hp->h_addr_list; *pptr; pptr++) { struct in_addr ip = *(struct in_addr *)*pptr; struct addrinfo *ai = alloc_entry(hints, ip, port); if (!ai) { freeaddrinfo(listp); return EAI_MEMORY; } if (!listp) { listp = ai; prevp = ai; ai->ai_canonname = SMB_STRDUP(hp->h_name); if (!ai->ai_canonname) { freeaddrinfo(listp); return EAI_MEMORY; } } else { prevp->ai_next = ai; prevp = ai; } } *res = listp; return 0; } /* * get address info for ipv4 sockets. * * Bugs: - servname can only be a number, not text. */ int rep_getaddrinfo(const char *node, const char *service, const struct addrinfo * hintp, struct addrinfo ** res) { struct addrinfo hints; /* Setup the hints struct. */ if (hintp == NULL) { memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; } else { memcpy(&hints, hintp, sizeof(hints)); } if (hints.ai_family != AF_INET && hints.ai_family != AF_UNSPEC) { return EAI_FAMILY; } if (hints.ai_socktype == 0) { hints.ai_socktype = SOCK_STREAM; } if (!node && !service) { return EAI_NONAME; } if (node) { if (node[0] == '\0') { return getaddr_info_single_addr(service, INADDR_ANY, &hints, res); } else if (hints.ai_flags & AI_NUMERICHOST) { struct in_addr ip; if (!inet_aton(node, &ip)) { return EAI_FAIL; } return getaddr_info_single_addr(service, ntohl(ip.s_addr), &hints, res); } else { return getaddr_info_name(node, service, &hints, res); } } else if (hints.ai_flags & AI_PASSIVE) { return getaddr_info_single_addr(service, INADDR_ANY, &hints, res); } return getaddr_info_single_addr(service, INADDR_LOOPBACK, &hints, res); } void rep_freeaddrinfo(struct addrinfo *res) { struct addrinfo *next = NULL; for (;res; res = next) { next = res->ai_next; free(res->ai_canonname); free(res->ai_addr); free(res); } } const char *rep_gai_strerror(int errcode) { #ifdef HAVE_HSTRERROR int hcode; switch (errcode) { case EAI_NONAME: hcode = HOST_NOT_FOUND; break; case EAI_AGAIN: hcode = TRY_AGAIN; break; case EAI_FAIL: default: hcode = NO_RECOVERY; break; } return hstrerror(hcode); #else /* !HAVE_HSTRERROR */ switch (errcode) { case EAI_NONAME: return "Unknown host"; case EAI_AGAIN: return "Host name lookup failure"; #ifdef EAI_BADFLAGS case EAI_BADFLAGS: return "Invalid argument"; #endif #ifdef EAI_FAMILY case EAI_FAMILY: return "Address family not supported"; #endif #ifdef EAI_MEMORY case EAI_MEMORY: return "Not enough memory"; #endif #ifdef EAI_NODATA case EAI_NODATA: return "No host data of that type was found"; #endif #ifdef EAI_SERVICE case EAI_SERVICE: return "Class type not found"; #endif #ifdef EAI_SOCKTYPE case EAI_SOCKTYPE: return "Socket type not supported"; #endif default: return "Unknown server error"; } #endif /* HAVE_HSTRERROR */ } static int gethostnameinfo(const struct sockaddr *sa, char *node, size_t nodelen, int flags) { int ret = -1; char *p = NULL; if (!(flags & NI_NUMERICHOST)) { struct hostent *hp = gethostbyaddr( &((struct sockaddr_in *)sa)->sin_addr, sizeof(struct in_addr), sa->sa_family); ret = check_hostent_err(hp); if (ret == 0) { /* Name looked up successfully. */ ret = snprintf(node, nodelen, "%s", hp->h_name); if (ret < 0 || (size_t)ret >= nodelen) { return EAI_MEMORY; } if (flags & NI_NOFQDN) { p = strchr(node,'.'); if (p) { *p = '\0'; } } return 0; } if (flags & NI_NAMEREQD) { /* If we require a name and didn't get one, * automatically fail. */ return ret; } /* Otherwise just fall into the numeric host code... */ } p = inet_ntoa(((struct sockaddr_in *)sa)->sin_addr); ret = snprintf(node, nodelen, "%s", p); if (ret < 0 || (size_t)ret >= nodelen) { return EAI_MEMORY; } return 0; } static int getservicenameinfo(const struct sockaddr *sa, char *service, size_t servicelen, int flags) { int ret = -1; int port = ntohs(((struct sockaddr_in *)sa)->sin_port); if (!(flags & NI_NUMERICSERV)) { struct servent *se = getservbyport( port, (flags & NI_DGRAM) ? "udp" : "tcp"); if (se && se->s_name) { /* Service name looked up successfully. */ ret = snprintf(service, servicelen, "%s", se->s_name); if (ret < 0 || (size_t)ret >= servicelen) { return EAI_MEMORY; } return 0; } /* Otherwise just fall into the numeric service code... */ } ret = snprintf(service, servicelen, "%d", port); if (ret < 0 || (size_t)ret >= servicelen) { return EAI_MEMORY; } return 0; } /* * Convert an ipv4 address to a hostname. * * Bugs: - No IPv6 support. */ int rep_getnameinfo(const struct sockaddr *sa, socklen_t salen, char *node, size_t nodelen, char *service, size_t servicelen, int flags) { /* Invalid arguments. */ if (sa == NULL || (node == NULL && service == NULL)) { return EAI_FAIL; } if (sa->sa_family != AF_INET) { return EAI_FAIL; } if (salen < sizeof(struct sockaddr_in)) { return EAI_FAIL; } if (node) { return gethostnameinfo(sa, node, nodelen, flags); } if (service) { return getservicenameinfo(sa, service, servicelen, flags); } return 0; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/getaddrinfo.h0000660000000000000000000000614100000000000017655 0ustar00rootroot00000000000000/* PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ /*------------------------------------------------------------------------- * * getaddrinfo.h * Support getaddrinfo() on platforms that don't have it. * * Note: we use our own routines on platforms that don't HAVE_STRUCT_ADDRINFO, * whether or not the library routine getaddrinfo() can be found. This * policy is needed because on some platforms a manually installed libbind.a * may provide getaddrinfo(), yet the system headers may not provide the * struct definitions needed to call it. To avoid conflict with the libbind * definition in such cases, we rename our routines to pg_xxx() via macros. * in lib/replace we use rep_xxx() * This code will also work on platforms where struct addrinfo is defined * in the system headers but no getaddrinfo() can be located. * * Copyright (c) 2003-2007, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ #ifndef GETADDRINFO_H #define GETADDRINFO_H #ifndef HAVE_GETADDRINFO /* Rename private copies per comments above */ #ifdef getaddrinfo #undef getaddrinfo #endif #define getaddrinfo rep_getaddrinfo #define HAVE_GETADDRINFO #ifdef freeaddrinfo #undef freeaddrinfo #endif #define freeaddrinfo rep_freeaddrinfo #define HAVE_FREEADDRINFO #ifdef gai_strerror #undef gai_strerror #endif #define gai_strerror rep_gai_strerror #define HAVE_GAI_STRERROR #ifdef getnameinfo #undef getnameinfo #endif #define getnameinfo rep_getnameinfo #ifndef HAVE_GETNAMEINFO #define HAVE_GETNAMEINFO #endif extern int rep_getaddrinfo(const char *node, const char *service, const struct addrinfo * hints, struct addrinfo ** res); extern void rep_freeaddrinfo(struct addrinfo * res); extern const char *rep_gai_strerror(int errcode); extern int rep_getnameinfo(const struct sockaddr * sa, socklen_t salen, char *node, size_t nodelen, char *service, size_t servicelen, int flags); #endif /* HAVE_GETADDRINFO */ #endif /* GETADDRINFO_H */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/getifaddrs.c0000660000000000000000000002046000000000000017476 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Samba utility functions Copyright (C) Andrew Tridgell 1998 Copyright (C) Jeremy Allison 2007 Copyright (C) Jelmer Vernooij 2007 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" #include #include #include #ifdef HAVE_SYS_TIME_H #include #endif #ifndef SIOCGIFCONF #ifdef HAVE_SYS_SOCKIO_H #include #endif #endif #ifdef HAVE_IFACE_GETIFADDRS #define _FOUND_IFACE_ANY #else void rep_freeifaddrs(struct ifaddrs *ifp) { if (ifp != NULL) { free(ifp->ifa_name); free(ifp->ifa_addr); free(ifp->ifa_netmask); free(ifp->ifa_dstaddr); freeifaddrs(ifp->ifa_next); free(ifp); } } static struct sockaddr *sockaddr_dup(struct sockaddr *sa) { struct sockaddr *ret; socklen_t socklen; #ifdef HAVE_SOCKADDR_SA_LEN socklen = sa->sa_len; #else socklen = sizeof(struct sockaddr_storage); #endif ret = calloc(1, socklen); if (ret == NULL) return NULL; memcpy(ret, sa, socklen); return ret; } #endif #ifdef HAVE_IFACE_IFCONF /* this works for Linux 2.2, Solaris 2.5, SunOS4, HPUX 10.20, OSF1 V4.0, Ultrix 4.4, SCO Unix 3.2, IRIX 6.4 and FreeBSD 3.2. It probably also works on any BSD style system. */ int rep_getifaddrs(struct ifaddrs **ifap) { struct ifconf ifc; char buff[8192]; int fd, i, n; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } ifc.ifc_len = sizeof(buff); ifc.ifc_buf = buff; if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { close(fd); return -1; } ifr = ifc.ifc_req; n = ifc.ifc_len / sizeof(struct ifreq); /* Loop through interfaces, looking for given IP address */ for (i=n-1; i>=0; i--) { if (ioctl(fd, SIOCGIFFLAGS, &ifr[i]) == -1) { freeifaddrs(*ifap); close(fd); return -1; } curif = calloc(1, sizeof(struct ifaddrs)); if (curif == NULL) { freeifaddrs(*ifap); close(fd); return -1; } curif->ifa_name = strdup(ifr[i].ifr_name); if (curif->ifa_name == NULL) { free(curif); freeifaddrs(*ifap); close(fd); return -1; } curif->ifa_flags = ifr[i].ifr_flags; curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_next = NULL; curif->ifa_addr = NULL; if (ioctl(fd, SIOCGIFADDR, &ifr[i]) != -1) { curif->ifa_addr = sockaddr_dup(&ifr[i].ifr_addr); if (curif->ifa_addr == NULL) { free(curif->ifa_name); free(curif); freeifaddrs(*ifap); close(fd); return -1; } } curif->ifa_netmask = NULL; if (ioctl(fd, SIOCGIFNETMASK, &ifr[i]) != -1) { curif->ifa_netmask = sockaddr_dup(&ifr[i].ifr_addr); if (curif->ifa_netmask == NULL) { if (curif->ifa_addr != NULL) { free(curif->ifa_addr); } free(curif->ifa_name); free(curif); freeifaddrs(*ifap); close(fd); return -1; } } if (lastif == NULL) { *ifap = curif; } else { lastif->ifa_next = curif; } lastif = curif; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_IFCONF */ #ifdef HAVE_IFACE_IFREQ #ifndef I_STR #include #endif /**************************************************************************** this should cover most of the streams based systems Thanks to Andrej.Borsenkow@mow.siemens.ru for several ideas in this code ****************************************************************************/ int rep_getifaddrs(struct ifaddrs **ifap) { struct ifreq ifreq; struct strioctl strioctl; char buff[8192]; int fd, i, n; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } strioctl.ic_cmd = SIOCGIFCONF; strioctl.ic_dp = buff; strioctl.ic_len = sizeof(buff); if (ioctl(fd, I_STR, &strioctl) < 0) { close(fd); return -1; } /* we can ignore the possible sizeof(int) here as the resulting number of interface structures won't change */ n = strioctl.ic_len / sizeof(struct ifreq); /* we will assume that the kernel returns the length as an int at the start of the buffer if the offered size is a multiple of the structure size plus an int */ if (n*sizeof(struct ifreq) + sizeof(int) == strioctl.ic_len) { ifr = (struct ifreq *)(buff + sizeof(int)); } else { ifr = (struct ifreq *)buff; } /* Loop through interfaces */ for (i = 0; iifa_next = curif; } strioctl.ic_cmd = SIOCGIFFLAGS; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_flags = ifreq.ifr_flags; strioctl.ic_cmd = SIOCGIFADDR; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_name = strdup(ifreq.ifr_name); curif->ifa_addr = sockaddr_dup(&ifreq.ifr_addr); curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_next = NULL; curif->ifa_netmask = NULL; strioctl.ic_cmd = SIOCGIFNETMASK; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_netmask = sockaddr_dup(&ifreq.ifr_addr); lastif = curif; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_IFREQ */ #ifdef HAVE_IFACE_AIX /**************************************************************************** this one is for AIX (tested on 4.2) ****************************************************************************/ int rep_getifaddrs(struct ifaddrs **ifap) { char buff[8192]; int fd, i; struct ifconf ifc; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } ifc.ifc_len = sizeof(buff); ifc.ifc_buf = buff; if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { close(fd); return -1; } ifr = ifc.ifc_req; /* Loop through interfaces */ i = ifc.ifc_len; while (i > 0) { unsigned int inc; inc = ifr->ifr_addr.sa_len; if (ioctl(fd, SIOCGIFADDR, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif = calloc(1, sizeof(struct ifaddrs)); if (lastif == NULL) { *ifap = curif; } else { lastif->ifa_next = curif; } curif->ifa_name = strdup(ifr->ifr_name); curif->ifa_addr = sockaddr_dup(&ifr->ifr_addr); curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_netmask = NULL; curif->ifa_next = NULL; if (ioctl(fd, SIOCGIFFLAGS, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif->ifa_flags = ifr->ifr_flags; if (ioctl(fd, SIOCGIFNETMASK, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif->ifa_netmask = sockaddr_dup(&ifr->ifr_addr); lastif = curif; next: /* * Patch from Archie Cobbs (archie@whistle.com). The * addresses in the SIOCGIFCONF interface list have a * minimum size. Usually this doesn't matter, but if * your machine has tunnel interfaces, etc. that have * a zero length "link address", this does matter. */ if (inc < sizeof(ifr->ifr_addr)) inc = sizeof(ifr->ifr_addr); inc += IFNAMSIZ; ifr = (struct ifreq*) (((char*) ifr) + inc); i -= inc; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_AIX */ #ifndef _FOUND_IFACE_ANY int rep_getifaddrs(struct ifaddrs **ifap) { errno = ENOSYS; return -1; } #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/hdr_replace.h0000660000000000000000000000012400000000000017632 0ustar00rootroot00000000000000/* this is a replacement header for a missing system header */ #include "replace.h" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/inet_aton.c0000660000000000000000000000223200000000000017337 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement functions * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" /** * We know that we have inet_pton from earlier libreplace checks. */ int rep_inet_aton(const char *src, struct in_addr *dst) { return (inet_pton(AF_INET, src, dst) > 0) ? 1 : 0; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/inet_ntoa.c0000660000000000000000000000247500000000000017350 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement routines for broken systems * Copyright (C) Andrew Tridgell 2003 * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" /** * NOTE: this is not thread safe, but it can't be, either * since it returns a pointer to static memory. */ char *rep_inet_ntoa(struct in_addr ip) { uint8_t *p = (uint8_t *)&ip.s_addr; static char buf[18]; slprintf(buf, 17, "%d.%d.%d.%d", (int)p[0], (int)p[1], (int)p[2], (int)p[3]); return buf; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/inet_ntop.c0000660000000000000000000001163200000000000017362 0ustar00rootroot00000000000000/* * Copyright (C) 1996-2001 Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "replace.h" #include "system/network.h" #define NS_INT16SZ 2 #define NS_IN6ADDRSZ 16 /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static const char *inet_ntop4(const unsigned char *src, char *dst, socklen_t size); #ifdef AF_INET6 static const char *inet_ntop6(const unsigned char *src, char *dst, socklen_t size); #endif /* char * * isc_net_ntop(af, src, dst, size) * convert a network format address to presentation format. * return: * pointer to presentation format address (`dst'), or NULL (see errno). * author: * Paul Vixie, 1996. */ const char * rep_inet_ntop(int af, const void *src, char *dst, socklen_t size) { switch (af) { case AF_INET: return (inet_ntop4(src, dst, size)); #ifdef AF_INET6 case AF_INET6: return (inet_ntop6(src, dst, size)); #endif default: errno = EAFNOSUPPORT; return (NULL); } /* NOTREACHED */ } /* const char * * inet_ntop4(src, dst, size) * format an IPv4 address * return: * `dst' (as a const) * notes: * (1) uses no statics * (2) takes a unsigned char* not an in_addr as input * author: * Paul Vixie, 1996. */ static const char * inet_ntop4(const unsigned char *src, char *dst, socklen_t size) { static const char *fmt = "%u.%u.%u.%u"; char tmp[sizeof "255.255.255.255"]; size_t len; len = snprintf(tmp, sizeof tmp, fmt, src[0], src[1], src[2], src[3]); if (len >= size) { errno = ENOSPC; return (NULL); } memcpy(dst, tmp, len + 1); return (dst); } /* const char * * isc_inet_ntop6(src, dst, size) * convert IPv6 binary address into presentation (printable) format * author: * Paul Vixie, 1996. */ #ifdef AF_INET6 static const char * inet_ntop6(const unsigned char *src, char *dst, socklen_t size) { /* * Note that int32_t and int16_t need only be "at least" large enough * to contain a value of the specified size. On some systems, like * Crays, there is no such thing as an integer variable with 16 bits. * Keep this in mind if you think this function should have been coded * to use pointer overlays. All the world's not a VAX. */ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; struct { int base, len; } best, cur; unsigned int words[NS_IN6ADDRSZ / NS_INT16SZ]; int i, inc; /* * Preprocess: * Copy the input (bytewise) array into a wordwise array. * Find the longest run of 0x00's in src[] for :: shorthanding. */ memset(words, '\0', sizeof words); for (i = 0; i < NS_IN6ADDRSZ; i++) words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); best.base = -1; best.len = 0; cur.base = -1; cur.len = 0; for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { if (words[i] == 0) { if (cur.base == -1) cur.base = i, cur.len = 1; else cur.len++; } else { if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; cur.base = -1; } } } if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; } if (best.base != -1 && best.len < 2) best.base = -1; /* * Format the result. */ tp = tmp; for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { /* Are we inside the best run of 0x00's? */ if (best.base != -1 && i >= best.base && i < (best.base + best.len)) { if (i == best.base) *tp++ = ':'; continue; } /* Are we following an initial run of 0x00s or any real hex? */ if (i != 0) *tp++ = ':'; /* Is this address an encapsulated IPv4? */ if (i == 6 && best.base == 0 && (best.len == 6 || (best.len == 5 && words[5] == 0xffff))) { if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp))) return (NULL); tp += strlen(tp); break; } inc = snprintf(tp, 5, "%x", words[i]); if (inc >= 5) { abort(); } tp += inc; } /* Was it a trailing run of 0x00's? */ if (best.base != -1 && (best.base + best.len) == (NS_IN6ADDRSZ / NS_INT16SZ)) *tp++ = ':'; *tp++ = '\0'; /* * Check for overflow, copy, and we're done. */ if ((size_t)(tp - tmp) > size) { errno = ENOSPC; return (NULL); } memcpy(dst, tmp, tp - tmp); return (dst); } #endif /* AF_INET6 */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/inet_pton.c0000660000000000000000000001201000000000000017351 0ustar00rootroot00000000000000/* * Copyright (C) 1996-2001 Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "replace.h" #include "system/network.h" #define NS_INT16SZ 2 #define NS_INADDRSZ 4 #define NS_IN6ADDRSZ 16 /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static int inet_pton4(const char *src, unsigned char *dst); #ifdef AF_INET6 static int inet_pton6(const char *src, unsigned char *dst); #endif /* int * inet_pton(af, src, dst) * convert from presentation format (which usually means ASCII printable) * to network format (which is usually some kind of binary format). * return: * 1 if the address was valid for the specified address family * 0 if the address wasn't valid (`dst' is untouched in this case) * -1 if some other error occurred (`dst' is untouched in this case, too) * author: * Paul Vixie, 1996. */ int rep_inet_pton(int af, const char *src, void *dst) { switch (af) { case AF_INET: return (inet_pton4(src, dst)); #ifdef AF_INET6 case AF_INET6: return (inet_pton6(src, dst)); #endif default: errno = EAFNOSUPPORT; return (-1); } /* NOTREACHED */ } /* int * inet_pton4(src, dst) * like inet_aton() but without all the hexadecimal and shorthand. * return: * 1 if `src' is a valid dotted quad, else 0. * notice: * does not touch `dst' unless it's returning 1. * author: * Paul Vixie, 1996. */ static int inet_pton4(src, dst) const char *src; unsigned char *dst; { static const char digits[] = "0123456789"; int saw_digit, octets, ch; unsigned char tmp[NS_INADDRSZ], *tp; saw_digit = 0; octets = 0; *(tp = tmp) = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr(digits, ch)) != NULL) { unsigned int new = *tp * 10 + (pch - digits); if (new > 255) return (0); *tp = new; if (! saw_digit) { if (++octets > 4) return (0); saw_digit = 1; } } else if (ch == '.' && saw_digit) { if (octets == 4) return (0); *++tp = 0; saw_digit = 0; } else return (0); } if (octets < 4) return (0); memcpy(dst, tmp, NS_INADDRSZ); return (1); } /* int * inet_pton6(src, dst) * convert presentation level address to network order binary form. * return: * 1 if `src' is a valid [RFC1884 2.2] address, else 0. * notice: * (1) does not touch `dst' unless it's returning 1. * (2) :: in a full address is silently ignored. * credit: * inspired by Mark Andrews. * author: * Paul Vixie, 1996. */ #ifdef AF_INET6 static int inet_pton6(src, dst) const char *src; unsigned char *dst; { static const char xdigits_l[] = "0123456789abcdef", xdigits_u[] = "0123456789ABCDEF"; unsigned char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp; const char *xdigits, *curtok; int ch, saw_xdigit; unsigned int val; memset((tp = tmp), '\0', NS_IN6ADDRSZ); endp = tp + NS_IN6ADDRSZ; colonp = NULL; /* Leading :: requires some special handling. */ if (*src == ':') if (*++src != ':') return (0); curtok = src; saw_xdigit = 0; val = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) pch = strchr((xdigits = xdigits_u), ch); if (pch != NULL) { val <<= 4; val |= (pch - xdigits); if (val > 0xffff) return (0); saw_xdigit = 1; continue; } if (ch == ':') { curtok = src; if (!saw_xdigit) { if (colonp) return (0); colonp = tp; continue; } if (tp + NS_INT16SZ > endp) return (0); *tp++ = (unsigned char) (val >> 8) & 0xff; *tp++ = (unsigned char) val & 0xff; saw_xdigit = 0; val = 0; continue; } if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) && inet_pton4(curtok, tp) > 0) { tp += NS_INADDRSZ; saw_xdigit = 0; break; /* '\0' was seen by inet_pton4(). */ } return (0); } if (saw_xdigit) { if (tp + NS_INT16SZ > endp) return (0); *tp++ = (unsigned char) (val >> 8) & 0xff; *tp++ = (unsigned char) val & 0xff; } if (colonp != NULL) { /* * Since some memmove()'s erroneously fail to handle * overlapping regions, we'll do the shift by hand. */ const int n = tp - colonp; int i; for (i = 1; i <= n; i++) { endp[- i] = colonp[n - i]; colonp[n - i] = 0; } tp = endp; } if (tp != endp) return (0); memcpy(dst, tmp, NS_IN6ADDRSZ); return (1); } #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/poll.c0000660000000000000000000000640500000000000016333 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. poll.c - poll wrapper This file is based on code from libssh (LGPLv2.1+ at the time it was downloaded), thus the following copyrights: Copyright (c) 2009-2010 by Andreas Schneider Copyright (c) 2003-2009 by Aris Adamantiadis Copyright (c) 2009 Aleksandar Kanchev Copyright (C) Volker Lendecke 2011 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/select.h" #ifdef HAVE_SYS_TIME_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout) { fd_set rfds, wfds, efds; struct timeval tv, *ptv; int max_fd; int rc; nfds_t i; if ((fds == NULL) && (nfds != 0)) { errno = EFAULT; return -1; } FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); rc = 0; max_fd = 0; /* compute fd_sets and find largest descriptor */ for (i = 0; i < nfds; i++) { if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { fds[i].revents = POLLNVAL; continue; } if (fds[i].events & (POLLIN | POLLRDNORM)) { FD_SET(fds[i].fd, &rfds); } if (fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) { FD_SET(fds[i].fd, &wfds); } if (fds[i].events & (POLLPRI | POLLRDBAND)) { FD_SET(fds[i].fd, &efds); } if (fds[i].fd > max_fd && (fds[i].events & (POLLIN | POLLOUT | POLLPRI | POLLRDNORM | POLLRDBAND | POLLWRNORM | POLLWRBAND))) { max_fd = fds[i].fd; } } if (timeout < 0) { ptv = NULL; } else { ptv = &tv; if (timeout == 0) { tv.tv_sec = 0; tv.tv_usec = 0; } else { tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; } } rc = select(max_fd + 1, &rfds, &wfds, &efds, ptv); if (rc < 0) { return -1; } for (rc = 0, i = 0; i < nfds; i++) { if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { continue; } fds[i].revents = 0; if (FD_ISSET(fds[i].fd, &rfds)) { int err = errno; int available = 0; int ret; /* support for POLLHUP */ ret = ioctl(fds[i].fd, FIONREAD, &available); if ((ret == -1) || (available == 0)) { fds[i].revents |= POLLHUP; } else { fds[i].revents |= fds[i].events & (POLLIN | POLLRDNORM); } errno = err; } if (FD_ISSET(fds[i].fd, &wfds)) { fds[i].revents |= fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND); } if (FD_ISSET(fds[i].fd, &efds)) { fds[i].revents |= fds[i].events & (POLLPRI | POLLRDBAND); } if (fds[i].revents & ~POLLHUP) { rc++; } } return rc; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/replace-test.h0000660000000000000000000000033700000000000017760 0ustar00rootroot00000000000000#ifndef __LIB_REPLACE_REPLACE_TEST_H__ #define __LIB_REPLACE_REPLACE_TEST_H__ int libreplace_test_strptime(void); int test_readdir_os2_delete(void); int getifaddrs_test(void); #endif /* __LIB_REPLACE_REPLACE_TEST_H__ */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/replace-testsuite.h0000660000000000000000000000036300000000000021031 0ustar00rootroot00000000000000#ifndef __LIB_REPLACE_REPLACE_TESTSUITE_H__ #define __LIB_REPLACE_REPLACE_TESTSUITE_H__ #include struct torture_context; bool torture_local_replace(struct torture_context *ctx); #endif /* __LIB_REPLACE_REPLACE_TESTSUITE_H__ */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/replace/replace.c0000660000000000000000000005352500000000000017005 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. replacement routines for broken systems Copyright (C) Andrew Tridgell 1992-1998 Copyright (C) Jelmer Vernooij 2005-2008 Copyright (C) Matthieu Patou 2010 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/time.h" #include "system/network.h" #include "system/passwd.h" #include "system/syslog.h" #include "system/locale.h" #include "system/wait.h" #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif void replace_dummy(void); void replace_dummy(void) {} #ifndef HAVE_FTRUNCATE /******************************************************************* ftruncate for operating systems that don't have it ********************************************************************/ int rep_ftruncate(int f, off_t l) { #ifdef HAVE_CHSIZE return chsize(f,l); #elif defined(F_FREESP) struct flock fl; fl.l_whence = 0; fl.l_len = 0; fl.l_start = l; fl.l_type = F_WRLCK; return fcntl(f, F_FREESP, &fl); #else #error "you must have a ftruncate function" #endif } #endif /* HAVE_FTRUNCATE */ #ifndef HAVE_STRLCPY /* * Like strncpy but does not 0 fill the buffer and always null * terminates. bufsize is the size of the destination buffer. * Returns the length of s. */ size_t rep_strlcpy(char *d, const char *s, size_t bufsize) { size_t len = strlen(s); size_t ret = len; if (bufsize <= 0) { return 0; } if (len >= bufsize) { len = bufsize - 1; } memcpy(d, s, len); d[len] = 0; return ret; } #endif #ifndef HAVE_STRLCAT /* like strncat but does not 0 fill the buffer and always null terminates. bufsize is the length of the buffer, which should be one more than the maximum resulting string length */ size_t rep_strlcat(char *d, const char *s, size_t bufsize) { size_t len1 = strnlen(d, bufsize); size_t len2 = strlen(s); size_t ret = len1 + len2; if (len1+len2 >= bufsize) { if (bufsize < (len1+1)) { return ret; } len2 = bufsize - (len1+1); } if (len2 > 0) { memcpy(d+len1, s, len2); d[len1+len2] = 0; } return ret; } #endif #ifndef HAVE_MKTIME /******************************************************************* a mktime() replacement for those who don't have it - contributed by C.A. Lademann Corrections by richard.kettlewell@kewill.com ********************************************************************/ #define MINUTE 60 #define HOUR 60*MINUTE #define DAY 24*HOUR #define YEAR 365*DAY time_t rep_mktime(struct tm *t) { struct tm *u; time_t epoch = 0; int n; int mon [] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, y, m, i; if(t->tm_year < 70) return((time_t)-1); n = t->tm_year + 1900 - 1; epoch = (t->tm_year - 70) * YEAR + ((n / 4 - n / 100 + n / 400) - (1969 / 4 - 1969 / 100 + 1969 / 400)) * DAY; y = t->tm_year + 1900; m = 0; for(i = 0; i < t->tm_mon; i++) { epoch += mon [m] * DAY; if(m == 1 && y % 4 == 0 && (y % 100 != 0 || y % 400 == 0)) epoch += DAY; if(++m > 11) { m = 0; y++; } } epoch += (t->tm_mday - 1) * DAY; epoch += t->tm_hour * HOUR + t->tm_min * MINUTE + t->tm_sec; if((u = localtime(&epoch)) != NULL) { t->tm_sec = u->tm_sec; t->tm_min = u->tm_min; t->tm_hour = u->tm_hour; t->tm_mday = u->tm_mday; t->tm_mon = u->tm_mon; t->tm_year = u->tm_year; t->tm_wday = u->tm_wday; t->tm_yday = u->tm_yday; t->tm_isdst = u->tm_isdst; } return(epoch); } #endif /* !HAVE_MKTIME */ #ifndef HAVE_INITGROUPS /**************************************************************************** some systems don't have an initgroups call ****************************************************************************/ int rep_initgroups(char *name, gid_t id) { #ifndef HAVE_SETGROUPS /* yikes! no SETGROUPS or INITGROUPS? how can this work? */ errno = ENOSYS; return -1; #else /* HAVE_SETGROUPS */ #include gid_t *grouplst = NULL; int max_gr = NGROUPS_MAX; int ret; int i,j; struct group *g; char *gr; if((grouplst = malloc(sizeof(gid_t) * max_gr)) == NULL) { errno = ENOMEM; return -1; } grouplst[0] = id; i = 1; while (i < max_gr && ((g = (struct group *)getgrent()) != (struct group *)NULL)) { if (g->gr_gid == id) continue; j = 0; gr = g->gr_mem[0]; while (gr && (*gr != (char)NULL)) { if (strcmp(name,gr) == 0) { grouplst[i] = g->gr_gid; i++; gr = (char *)NULL; break; } gr = g->gr_mem[++j]; } } endgrent(); ret = setgroups(i, grouplst); free(grouplst); return ret; #endif /* HAVE_SETGROUPS */ } #endif /* HAVE_INITGROUPS */ #ifndef HAVE_MEMMOVE /******************************************************************* safely copies memory, ensuring no overlap problems. this is only used if the machine does not have its own memmove(). this is not the fastest algorithm in town, but it will do for our needs. ********************************************************************/ void *rep_memmove(void *dest,const void *src,int size) { unsigned long d,s; int i; if (dest==src || !size) return(dest); d = (unsigned long)dest; s = (unsigned long)src; if ((d >= (s+size)) || (s >= (d+size))) { /* no overlap */ memcpy(dest,src,size); return(dest); } if (d < s) { /* we can forward copy */ if (s-d >= sizeof(int) && !(s%sizeof(int)) && !(d%sizeof(int)) && !(size%sizeof(int))) { /* do it all as words */ int *idest = (int *)dest; int *isrc = (int *)src; size /= sizeof(int); for (i=0;i= sizeof(int) && !(s%sizeof(int)) && !(d%sizeof(int)) && !(size%sizeof(int))) { /* do it all as words */ int *idest = (int *)dest; int *isrc = (int *)src; size /= sizeof(int); for (i=size-1;i>=0;i--) idest[i] = isrc[i]; } else { /* simplest */ char *cdest = (char *)dest; char *csrc = (char *)src; for (i=size-1;i>=0;i--) cdest[i] = csrc[i]; } } return(dest); } #endif /* HAVE_MEMMOVE */ #ifndef HAVE_STRDUP /**************************************************************************** duplicate a string ****************************************************************************/ char *rep_strdup(const char *s) { size_t len; char *ret; if (!s) return(NULL); len = strlen(s)+1; ret = (char *)malloc(len); if (!ret) return(NULL); memcpy(ret,s,len); return(ret); } #endif /* HAVE_STRDUP */ #ifndef HAVE_SETLINEBUF void rep_setlinebuf(FILE *stream) { setvbuf(stream, (char *)NULL, _IOLBF, 0); } #endif /* HAVE_SETLINEBUF */ #ifndef HAVE_VSYSLOG #ifdef HAVE_SYSLOG void rep_vsyslog (int facility_priority, const char *format, va_list arglist) { char *msg = NULL; vasprintf(&msg, format, arglist); if (!msg) return; syslog(facility_priority, "%s", msg); free(msg); } #endif /* HAVE_SYSLOG */ #endif /* HAVE_VSYSLOG */ #ifndef HAVE_STRNLEN /** Some platforms don't have strnlen **/ size_t rep_strnlen(const char *s, size_t max) { size_t len; for (len = 0; len < max; len++) { if (s[len] == '\0') { break; } } return len; } #endif #ifndef HAVE_STRNDUP /** Some platforms don't have strndup. **/ char *rep_strndup(const char *s, size_t n) { char *ret; n = strnlen(s, n); ret = malloc(n+1); if (!ret) return NULL; memcpy(ret, s, n); ret[n] = 0; return ret; } #endif #if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) int rep_waitpid(pid_t pid,int *status,int options) { return wait4(pid, status, options, NULL); } #endif #ifndef HAVE_SETEUID int rep_seteuid(uid_t euid) { #ifdef HAVE_SETRESUID return setresuid(-1, euid, -1); #else errno = ENOSYS; return -1; #endif } #endif #ifndef HAVE_SETEGID int rep_setegid(gid_t egid) { #ifdef HAVE_SETRESGID return setresgid(-1, egid, -1); #else errno = ENOSYS; return -1; #endif } #endif /******************************************************************* os/2 also doesn't have chroot ********************************************************************/ #ifndef HAVE_CHROOT int rep_chroot(const char *dname) { errno = ENOSYS; return -1; } #endif /***************************************************************** Possibly replace mkstemp if it is broken. *****************************************************************/ #ifndef HAVE_SECURE_MKSTEMP int rep_mkstemp(char *template) { /* have a reasonable go at emulating it. Hope that the system mktemp() isn't completely hopeless */ mktemp(template); if (template[0] == 0) return -1; return open(template, O_CREAT|O_EXCL|O_RDWR, 0600); } #endif #ifndef HAVE_MKDTEMP char *rep_mkdtemp(char *template) { char *dname; if ((dname = mktemp(template))) { if (mkdir(dname, 0700) >= 0) { return dname; } } return NULL; } #endif /***************************************************************** Watch out: this is not thread safe. *****************************************************************/ #ifndef HAVE_PREAD ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset) { if (lseek(__fd, __offset, SEEK_SET) != __offset) { return -1; } return read(__fd, __buf, __nbytes); } #endif /***************************************************************** Watch out: this is not thread safe. *****************************************************************/ #ifndef HAVE_PWRITE ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset) { if (lseek(__fd, __offset, SEEK_SET) != __offset) { return -1; } return write(__fd, __buf, __nbytes); } #endif #ifndef HAVE_STRCASESTR char *rep_strcasestr(const char *haystack, const char *needle) { const char *s; size_t nlen = strlen(needle); for (s=haystack;*s;s++) { if (toupper(*needle) == toupper(*s) && strncasecmp(s, needle, nlen) == 0) { return (char *)((uintptr_t)s); } } return NULL; } #endif #ifndef HAVE_STRSEP char *rep_strsep(char **pps, const char *delim) { char *ret = *pps; char *p = *pps; if (p == NULL) { return NULL; } p += strcspn(p, delim); if (*p == '\0') { *pps = NULL; } else { *p = '\0'; *pps = p + 1; } return ret; } #endif #ifndef HAVE_STRTOK_R /* based on GLIBC version, copyright Free Software Foundation */ char *rep_strtok_r(char *s, const char *delim, char **save_ptr) { char *token; if (s == NULL) s = *save_ptr; s += strspn(s, delim); if (*s == '\0') { *save_ptr = s; return NULL; } token = s; s = strpbrk(token, delim); if (s == NULL) { *save_ptr = token + strlen(token); } else { *s = '\0'; *save_ptr = s + 1; } return token; } #endif #ifndef HAVE_STRTOLL long long int rep_strtoll(const char *str, char **endptr, int base) { #ifdef HAVE_STRTOQ return strtoq(str, endptr, base); #elif defined(HAVE___STRTOLL) return __strtoll(str, endptr, base); #elif SIZEOF_LONG == SIZEOF_LONG_LONG return (long long int) strtol(str, endptr, base); #else # error "You need a strtoll function" #endif } #else #ifdef HAVE_BSD_STRTOLL #undef strtoll long long int rep_strtoll(const char *str, char **endptr, int base) { int saved_errno = errno; long long int nb = strtoll(str, endptr, base); /* With glibc EINVAL is only returned if base is not ok */ if (errno == EINVAL) { if (base == 0 || (base >1 && base <37)) { /* Base was ok so it's because we were not * able to make the conversion. * Let's reset errno. */ errno = saved_errno; } } return nb; } #endif /* HAVE_BSD_STRTOLL */ #endif /* HAVE_STRTOLL */ #ifndef HAVE_STRTOULL unsigned long long int rep_strtoull(const char *str, char **endptr, int base) { #ifdef HAVE_STRTOUQ return strtouq(str, endptr, base); #elif defined(HAVE___STRTOULL) return __strtoull(str, endptr, base); #elif SIZEOF_LONG == SIZEOF_LONG_LONG return (unsigned long long int) strtoul(str, endptr, base); #else # error "You need a strtoull function" #endif } #else #ifdef HAVE_BSD_STRTOLL #undef strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base) { int saved_errno = errno; unsigned long long int nb = strtoull(str, endptr, base); /* With glibc EINVAL is only returned if base is not ok */ if (errno == EINVAL) { if (base == 0 || (base >1 && base <37)) { /* Base was ok so it's because we were not * able to make the conversion. * Let's reset errno. */ errno = saved_errno; } } return nb; } #endif /* HAVE_BSD_STRTOLL */ #endif /* HAVE_STRTOULL */ #ifndef HAVE_SETENV int rep_setenv(const char *name, const char *value, int overwrite) { char *p; size_t l1, l2; int ret; if (!overwrite && getenv(name)) { return 0; } l1 = strlen(name); l2 = strlen(value); p = malloc(l1+l2+2); if (p == NULL) { return -1; } memcpy(p, name, l1); p[l1] = '='; memcpy(p+l1+1, value, l2); p[l1+l2+1] = 0; ret = putenv(p); if (ret != 0) { free(p); } return ret; } #endif #ifndef HAVE_UNSETENV int rep_unsetenv(const char *name) { extern char **environ; size_t len = strlen(name); size_t i, count; if (environ == NULL || getenv(name) == NULL) { return 0; } for (i=0;environ[i];i++) /* noop */ ; count=i; for (i=0;i= needlelen) { char *p = (char *)memchr(haystack, *(const char *)needle, haystacklen-(needlelen-1)); if (!p) return NULL; if (memcmp(p, needle, needlelen) == 0) { return p; } haystack = p+1; haystacklen -= (p - (const char *)haystack) + 1; } return NULL; } #endif #if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_vdprintf(int fd, const char *format, va_list ap) { char *s = NULL; int ret; vasprintf(&s, format, ap); if (s == NULL) { errno = ENOMEM; return -1; } ret = write(fd, s, strlen(s)); free(s); return ret; } #endif #if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_dprintf(int fd, const char *format, ...) { int ret; va_list ap; va_start(ap, format); ret = vdprintf(fd, format, ap); va_end(ap); return ret; } #endif #ifndef HAVE_GET_CURRENT_DIR_NAME char *rep_get_current_dir_name(void) { char buf[PATH_MAX+1]; char *p; p = getcwd(buf, sizeof(buf)); if (p == NULL) { return NULL; } return strdup(p); } #endif #ifndef HAVE_STRERROR_R int rep_strerror_r(int errnum, char *buf, size_t buflen) { char *s = strerror(errnum); if (strlen(s)+1 > buflen) { errno = ERANGE; return -1; } strncpy(buf, s, buflen); return 0; } #elif (!defined(STRERROR_R_XSI_NOT_GNU)) #undef strerror_r int rep_strerror_r(int errnum, char *buf, size_t buflen) { char *s = strerror_r(errnum, buf, buflen); if (s == NULL) { /* Shouldn't happen, should always get a string */ return EINVAL; } if (s != buf) { strlcpy(buf, s, buflen); if (strlen(s) > buflen - 1) { return ERANGE; } } return 0; } #endif #ifndef HAVE_CLOCK_GETTIME int rep_clock_gettime(clockid_t clk_id, struct timespec *tp) { struct timeval tval; switch (clk_id) { case 0: /* CLOCK_REALTIME :*/ #if defined(HAVE_GETTIMEOFDAY_TZ) || defined(HAVE_GETTIMEOFDAY_TZ_VOID) gettimeofday(&tval,NULL); #else gettimeofday(&tval); #endif tp->tv_sec = tval.tv_sec; tp->tv_nsec = tval.tv_usec * 1000; break; default: errno = EINVAL; return -1; } return 0; } #endif #ifndef HAVE_MEMALIGN void *rep_memalign( size_t align, size_t size ) { #if defined(HAVE_POSIX_MEMALIGN) void *p = NULL; int ret = posix_memalign( &p, align, size ); if ( ret == 0 ) return p; return NULL; #else /* On *BSD systems memaligns doesn't exist, but memory will * be aligned on allocations of > pagesize. */ #if defined(SYSCONF_SC_PAGESIZE) size_t pagesize = (size_t)sysconf(_SC_PAGESIZE); #elif defined(HAVE_GETPAGESIZE) size_t pagesize = (size_t)getpagesize(); #else size_t pagesize = (size_t)-1; #endif if (pagesize == (size_t)-1) { errno = ENOSYS; return NULL; } if (size < pagesize) { size = pagesize; } return malloc(size); #endif } #endif #ifndef HAVE_GETPEEREID int rep_getpeereid(int s, uid_t *uid, gid_t *gid) { #if defined(HAVE_PEERCRED) struct ucred cred; socklen_t cred_len = sizeof(struct ucred); int ret; #undef getsockopt ret = getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void *)&cred, &cred_len); if (ret != 0) { return -1; } if (cred_len != sizeof(struct ucred)) { errno = EINVAL; return -1; } *uid = cred.uid; *gid = cred.gid; return 0; #else errno = ENOSYS; return -1; #endif } #endif #ifndef HAVE_USLEEP int rep_usleep(useconds_t sec) { struct timeval tval; /* * Fake it with select... */ tval.tv_sec = 0; tval.tv_usec = usecs/1000; select(0,NULL,NULL,NULL,&tval); return 0; } #endif /* HAVE_USLEEP */ #ifndef HAVE_SETPROCTITLE void rep_setproctitle(const char *fmt, ...) { } #endif #ifndef HAVE_SETPROCTITLE_INIT void rep_setproctitle_init(int argc, char *argv[], char *envp[]) { } #endif #ifndef HAVE_MEMSET_S # ifndef RSIZE_MAX # define RSIZE_MAX (SIZE_MAX >> 1) # endif int rep_memset_s(void *dest, size_t destsz, int ch, size_t count) { if (dest == NULL) { return EINVAL; } if (destsz > RSIZE_MAX || count > RSIZE_MAX || count > destsz) { return ERANGE; } #if defined(HAVE_MEMSET_EXPLICIT) memset_explicit(dest, destsz, ch, count); #else /* HAVE_MEMSET_EXPLICIT */ memset(dest, ch, count); # if defined(HAVE_GCC_VOLATILE_MEMORY_PROTECTION) /* See http://llvm.org/bugs/show_bug.cgi?id=15495 */ __asm__ volatile("" : : "g"(dest) : "memory"); # endif /* HAVE_GCC_VOLATILE_MEMORY_PROTECTION */ #endif /* HAVE_MEMSET_EXPLICIT */ return 0; } #endif /* HAVE_MEMSET_S */ #ifndef HAVE_GETPROGNAME # ifndef HAVE_PROGRAM_INVOCATION_SHORT_NAME # define PROGNAME_SIZE 32 static char rep_progname[PROGNAME_SIZE]; # endif /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ const char *rep_getprogname(void) { #ifdef HAVE_PROGRAM_INVOCATION_SHORT_NAME return program_invocation_short_name; #else /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ FILE *fp = NULL; char cmdline[4096] = {0}; char *p = NULL; pid_t pid; size_t nread; int len; int rc; if (rep_progname[0] != '\0') { return rep_progname; } len = snprintf(rep_progname, sizeof(rep_progname), "%s", ""); if (len <= 0) { return NULL; } pid = getpid(); if (pid <= 1 || pid == (pid_t)-1) { return NULL; } len = snprintf(cmdline, sizeof(cmdline), "/proc/%u/cmdline", (unsigned int)pid); if (len <= 0 || len == sizeof(cmdline)) { return NULL; } fp = fopen(cmdline, "r"); if (fp == NULL) { return NULL; } nread = fread(cmdline, 1, sizeof(cmdline) - 1, fp); rc = fclose(fp); if (rc != 0) { return NULL; } if (nread == 0) { return NULL; } cmdline[nread] = '\0'; p = strrchr(cmdline, '/'); if (p != NULL) { p++; } else { p = cmdline; } len = strlen(p); if (len > PROGNAME_SIZE) { p[PROGNAME_SIZE - 1] = '\0'; } (void)snprintf(rep_progname, sizeof(rep_progname), "%s", p); return rep_progname; #endif /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ } #endif /* HAVE_GETPROGNAME */ #ifndef HAVE_COPY_FILE_RANGE # ifdef HAVE_SYSCALL_COPY_FILE_RANGE # include # endif ssize_t rep_copy_file_range(int fd_in, loff_t *off_in, int fd_out, loff_t *off_out, size_t len, unsigned int flags) { # ifdef HAVE_SYSCALL_COPY_FILE_RANGE return syscall(__NR_copy_file_range, fd_in, off_in, fd_out, off_out, len, flags); # endif /* HAVE_SYSCALL_COPY_FILE_RANGE */ errno = ENOSYS; return -1; } #endif /* HAVE_COPY_FILE_RANGE */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/replace/replace.h0000660000000000000000000005356400000000000017015 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. macros to go along with the lib/replace/ portability layer code Copyright (C) Andrew Tridgell 2005 Copyright (C) Jelmer Vernooij 2006-2008 Copyright (C) Jeremy Allison 2007. ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef _LIBREPLACE_REPLACE_H #define _LIBREPLACE_REPLACE_H #ifndef NO_CONFIG_H #include "config.h" #endif #ifdef HAVE_STANDARDS_H #include #endif /* * Needs to be defined before std*.h and string*.h are included * As it's also needed when Python.h is the first header we * require a global -D__STDC_WANT_LIB_EXT1__=1 */ #ifndef __STDC_WANT_LIB_EXT1__ #error -D__STDC_WANT_LIB_EXT1__=1 required #endif #include #include #include #include #ifndef HAVE_DECL_EWOULDBLOCK #define EWOULDBLOCK EAGAIN #endif #if defined(_MSC_VER) || defined(__MINGW32__) #include "win32_replace.h" #endif #ifdef HAVE_INTTYPES_H #define __STDC_FORMAT_MACROS #include #elif defined(HAVE_STDINT_H) #include /* force off HAVE_INTTYPES_H so that roken doesn't try to include both, which causes a warning storm on irix */ #undef HAVE_INTTYPES_H #endif #ifdef HAVE_MALLOC_H #include #endif #ifndef __PRI64_PREFIX # if __WORDSIZE == 64 && ! defined __APPLE__ # define __PRI64_PREFIX "l" # else # define __PRI64_PREFIX "ll" # endif #endif /* Decimal notation. */ #ifndef PRId8 # define PRId8 "d" #endif #ifndef PRId16 # define PRId16 "d" #endif #ifndef PRId32 # define PRId32 "d" #endif #ifndef PRId64 # define PRId64 __PRI64_PREFIX "d" #endif #ifndef PRIi8 # define PRIi8 "i" #endif #ifndef PRIi16 # define PRIi16 "i" #endif #ifndef PRIi32 # define PRIi32 "i" #endif #ifndef PRIi64 # define PRIi64 __PRI64_PREFIX "i" #endif #ifndef PRIu8 # define PRIu8 "u" #endif #ifndef PRIu16 # define PRIu16 "u" #endif #ifndef PRIu32 # define PRIu32 "u" #endif #ifndef PRIu64 # define PRIu64 __PRI64_PREFIX "u" #endif #ifndef SCNd8 # define SCNd8 "hhd" #endif #ifndef SCNd16 # define SCNd16 "hd" #endif #ifndef SCNd32 # define SCNd32 "d" #endif #ifndef SCNd64 # define SCNd64 __PRI64_PREFIX "d" #endif #ifndef SCNi8 # define SCNi8 "hhi" #endif #ifndef SCNi16 # define SCNi16 "hi" #endif #ifndef SCNi32 # define SCNi32 "i" #endif #ifndef SCNi64 # define SCNi64 __PRI64_PREFIX "i" #endif #ifndef SCNu8 # define SCNu8 "hhu" #endif #ifndef SCNu16 # define SCNu16 "hu" #endif #ifndef SCNu32 # define SCNu32 "u" #endif #ifndef SCNu64 # define SCNu64 __PRI64_PREFIX "u" #endif #ifdef HAVE_BSD_STRING_H #include #endif #ifdef HAVE_BSD_UNISTD_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_STRINGS_H #include #endif #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_SYSMACROS_H #include #endif #ifdef HAVE_SETPROCTITLE_H #include #endif #if STDC_HEADERS #include #include #endif #ifdef HAVE_LINUX_TYPES_H /* * This is needed as some broken header files require this to be included early */ #include #endif #ifndef HAVE_STRERROR extern char *sys_errlist[]; #define strerror(i) sys_errlist[i] #endif #ifndef HAVE_ERRNO_DECL extern int errno; #endif #ifndef HAVE_STRDUP #define strdup rep_strdup char *rep_strdup(const char *s); #endif #ifndef HAVE_MEMMOVE #define memmove rep_memmove void *rep_memmove(void *dest,const void *src,int size); #endif #ifndef HAVE_MEMMEM #define memmem rep_memmem void *rep_memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen); #endif #ifndef HAVE_MEMALIGN #define memalign rep_memalign void *rep_memalign(size_t boundary, size_t size); #endif #ifndef HAVE_MKTIME #define mktime rep_mktime /* prototype is in "system/time.h" */ #endif #ifndef HAVE_TIMEGM #define timegm rep_timegm /* prototype is in "system/time.h" */ #endif #ifndef HAVE_UTIME #define utime rep_utime /* prototype is in "system/time.h" */ #endif #ifndef HAVE_UTIMES #define utimes rep_utimes /* prototype is in "system/time.h" */ #endif #ifndef HAVE_STRLCPY #define strlcpy rep_strlcpy size_t rep_strlcpy(char *d, const char *s, size_t bufsize); #endif #ifndef HAVE_STRLCAT #define strlcat rep_strlcat size_t rep_strlcat(char *d, const char *s, size_t bufsize); #endif #ifndef HAVE_CLOSEFROM #define closefrom rep_closefrom int rep_closefrom(int lower); #endif #if (defined(BROKEN_STRNDUP) || !defined(HAVE_STRNDUP)) #undef HAVE_STRNDUP #define strndup rep_strndup char *rep_strndup(const char *s, size_t n); #endif #if (defined(BROKEN_STRNLEN) || !defined(HAVE_STRNLEN)) #undef HAVE_STRNLEN #define strnlen rep_strnlen size_t rep_strnlen(const char *s, size_t n); #endif #if !defined(HAVE_DECL_ENVIRON) # ifdef __APPLE__ # include # define environ (*_NSGetEnviron()) # else /* __APPLE__ */ extern char **environ; # endif /* __APPLE */ #endif /* !defined(HAVE_DECL_ENVIRON) */ #ifndef HAVE_SETENV #define setenv rep_setenv int rep_setenv(const char *name, const char *value, int overwrite); #else #ifndef HAVE_SETENV_DECL int setenv(const char *name, const char *value, int overwrite); #endif #endif #ifndef HAVE_UNSETENV #define unsetenv rep_unsetenv int rep_unsetenv(const char *name); #endif #ifndef HAVE_SETEUID #define seteuid rep_seteuid int rep_seteuid(uid_t); #endif #ifndef HAVE_SETEGID #define setegid rep_setegid int rep_setegid(gid_t); #endif #if (defined(USE_SETRESUID) && !defined(HAVE_SETRESUID_DECL)) /* stupid glibc */ int setresuid(uid_t ruid, uid_t euid, uid_t suid); #endif #if (defined(USE_SETRESUID) && !defined(HAVE_SETRESGID_DECL)) int setresgid(gid_t rgid, gid_t egid, gid_t sgid); #endif #ifndef HAVE_CHOWN #define chown rep_chown int rep_chown(const char *path, uid_t uid, gid_t gid); #endif #ifndef HAVE_CHROOT #define chroot rep_chroot int rep_chroot(const char *dirname); #endif #ifndef HAVE_LINK #define link rep_link int rep_link(const char *oldpath, const char *newpath); #endif #ifndef HAVE_READLINK #define readlink rep_readlink ssize_t rep_readlink(const char *path, char *buf, size_t bufsize); #endif #ifndef HAVE_SYMLINK #define symlink rep_symlink int rep_symlink(const char *oldpath, const char *newpath); #endif #ifndef HAVE_REALPATH #define realpath rep_realpath char *rep_realpath(const char *path, char *resolved_path); #endif #ifndef HAVE_LCHOWN #define lchown rep_lchown int rep_lchown(const char *fname,uid_t uid,gid_t gid); #endif #ifdef HAVE_UNIX_H #include #endif #ifndef HAVE_SETLINEBUF #define setlinebuf rep_setlinebuf void rep_setlinebuf(FILE *); #endif #ifndef HAVE_STRCASESTR #define strcasestr rep_strcasestr char *rep_strcasestr(const char *haystack, const char *needle); #endif #ifndef HAVE_STRSEP #define strsep rep_strsep char *rep_strsep(char **pps, const char *delim); #endif #ifndef HAVE_STRTOK_R #define strtok_r rep_strtok_r char *rep_strtok_r(char *s, const char *delim, char **save_ptr); #endif #ifndef HAVE_STRTOLL #define strtoll rep_strtoll long long int rep_strtoll(const char *str, char **endptr, int base); #else #ifdef HAVE_BSD_STRTOLL #define strtoll rep_strtoll long long int rep_strtoll(const char *str, char **endptr, int base); #endif #endif #ifndef HAVE_STRTOULL #define strtoull rep_strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base); #else #ifdef HAVE_BSD_STRTOLL /* yes, it's not HAVE_BSD_STRTOULL */ #define strtoull rep_strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base); #endif #endif #ifndef HAVE_FTRUNCATE #define ftruncate rep_ftruncate int rep_ftruncate(int,off_t); #endif #ifndef HAVE_INITGROUPS #define initgroups rep_initgroups int rep_initgroups(char *name, gid_t id); #endif #if !defined(HAVE_BZERO) && defined(HAVE_MEMSET) #define bzero(a,b) memset((a),'\0',(b)) #endif #ifndef HAVE_DLERROR #define dlerror rep_dlerror char *rep_dlerror(void); #endif #ifndef HAVE_DLOPEN #define dlopen rep_dlopen #ifdef DLOPEN_TAKES_UNSIGNED_FLAGS void *rep_dlopen(const char *name, unsigned int flags); #else void *rep_dlopen(const char *name, int flags); #endif #endif #ifndef HAVE_DLSYM #define dlsym rep_dlsym void *rep_dlsym(void *handle, const char *symbol); #endif #ifndef HAVE_DLCLOSE #define dlclose rep_dlclose int rep_dlclose(void *handle); #endif #ifndef HAVE_SOCKETPAIR #define socketpair rep_socketpair /* prototype is in system/network.h */ #endif /* for old gcc releases that don't have the feature test macro __has_attribute */ #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef PRINTF_ATTRIBUTE #if __has_attribute(format) || (__GNUC__ >= 3) /** Use gcc attribute to check printf fns. a1 is the 1-based index of * the parameter containing the format, and a2 the index of the first * argument. Note that some gcc 2.x versions don't handle this * properly **/ #define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) #else #define PRINTF_ATTRIBUTE(a1, a2) #endif #endif #ifndef _DEPRECATED_ #if __has_attribute(deprecated) || (__GNUC__ >= 3) #define _DEPRECATED_ __attribute__ ((deprecated)) #else #define _DEPRECATED_ #endif #endif #if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vdprintf rep_vdprintf int rep_vdprintf(int fd, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); #endif #if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define dprintf rep_dprintf int rep_dprintf(int fd, const char *format, ...) PRINTF_ATTRIBUTE(2,3); #endif #if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vasprintf rep_vasprintf int rep_vasprintf(char **ptr, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); #endif #if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define snprintf rep_snprintf int rep_snprintf(char *,size_t ,const char *, ...) PRINTF_ATTRIBUTE(3,4); #endif #if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vsnprintf rep_vsnprintf int rep_vsnprintf(char *,size_t ,const char *, va_list ap) PRINTF_ATTRIBUTE(3,0); #endif #if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define asprintf rep_asprintf int rep_asprintf(char **,const char *, ...) PRINTF_ATTRIBUTE(2,3); #endif #if !defined(HAVE_C99_VSNPRINTF) #ifdef REPLACE_BROKEN_PRINTF /* * We do not redefine printf by default * as it breaks the build if system headers * use __attribute__((format(printf, 3, 0))) * instead of __attribute__((format(__printf__, 3, 0))) */ #define printf rep_printf #endif int rep_printf(const char *, ...) PRINTF_ATTRIBUTE(1,2); #endif #if !defined(HAVE_C99_VSNPRINTF) #define fprintf rep_fprintf int rep_fprintf(FILE *stream, const char *, ...) PRINTF_ATTRIBUTE(2,3); #endif #ifndef HAVE_VSYSLOG #ifdef HAVE_SYSLOG #define vsyslog rep_vsyslog void rep_vsyslog (int facility_priority, const char *format, va_list arglist) PRINTF_ATTRIBUTE(2,0); #endif #endif /* we used to use these fns, but now we have good replacements for snprintf and vsnprintf */ #define slprintf snprintf #ifndef HAVE_VA_COPY #undef va_copy #ifdef HAVE___VA_COPY #define va_copy(dest, src) __va_copy(dest, src) #else #define va_copy(dest, src) (dest) = (src) #endif #endif #ifndef HAVE_VOLATILE #define volatile #endif #ifndef HAVE_COMPARISON_FN_T typedef int (*comparison_fn_t)(const void *, const void *); #endif #ifndef HAVE_WORKING_STRPTIME #define strptime rep_strptime struct tm; char *rep_strptime(const char *buf, const char *format, struct tm *tm); #endif #ifndef HAVE_DUP2 #define dup2 rep_dup2 int rep_dup2(int oldfd, int newfd); #endif /* Load header file for dynamic linking stuff */ #ifdef HAVE_DLFCN_H #include #endif #ifndef RTLD_LAZY #define RTLD_LAZY 0 #endif #ifndef RTLD_NOW #define RTLD_NOW 0 #endif #ifndef RTLD_GLOBAL #define RTLD_GLOBAL 0 #endif #ifndef HAVE_SECURE_MKSTEMP #define mkstemp(path) rep_mkstemp(path) int rep_mkstemp(char *temp); #endif #ifndef HAVE_MKDTEMP #define mkdtemp rep_mkdtemp char *rep_mkdtemp(char *template); #endif #ifndef HAVE_PREAD #define pread rep_pread ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset); #define LIBREPLACE_PREAD_REPLACED 1 #else #define LIBREPLACE_PREAD_NOT_REPLACED 1 #endif #ifndef HAVE_PWRITE #define pwrite rep_pwrite ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset); #define LIBREPLACE_PWRITE_REPLACED 1 #else #define LIBREPLACE_PWRITE_NOT_REPLACED 1 #endif #if !defined(HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) #define inet_ntoa rep_inet_ntoa /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_PTON #define inet_pton rep_inet_pton /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_NTOP #define inet_ntop rep_inet_ntop /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_ATON #define inet_aton rep_inet_aton /* prototype is in "system/network.h" */ #endif #ifndef HAVE_CONNECT #define connect rep_connect /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETHOSTBYNAME #define gethostbyname rep_gethostbyname /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETIFADDRS #define getifaddrs rep_getifaddrs /* prototype is in "system/network.h" */ #endif #ifndef HAVE_FREEIFADDRS #define freeifaddrs rep_freeifaddrs /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GET_CURRENT_DIR_NAME #define get_current_dir_name rep_get_current_dir_name char *rep_get_current_dir_name(void); #endif #if (!defined(HAVE_STRERROR_R) || !defined(STRERROR_R_XSI_NOT_GNU)) #define strerror_r rep_strerror_r int rep_strerror_r(int errnum, char *buf, size_t buflen); #endif #if !defined(HAVE_CLOCK_GETTIME) #define clock_gettime rep_clock_gettime #endif #ifdef HAVE_LIMITS_H #include #endif #ifdef HAVE_SYS_PARAM_H #include #endif /* The extra casts work around common compiler bugs. */ #define _TYPE_SIGNED(t) (! ((t) 0 < (t) -1)) /* The outer cast is needed to work around a bug in Cray C 5.0.3.0. It is necessary at least when t == time_t. */ #define _TYPE_MINIMUM(t) ((t) (_TYPE_SIGNED (t) \ ? ~ (t) 0 << (sizeof (t) * CHAR_BIT - 1) : (t) 0)) #define _TYPE_MAXIMUM(t) ((t) (~ (t) 0 - _TYPE_MINIMUM (t))) #ifndef UINT16_MAX #define UINT16_MAX 65535 #endif #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) #endif #ifndef UINT64_MAX #define UINT64_MAX ((uint64_t)-1) #endif #ifndef INT64_MAX #define INT64_MAX 9223372036854775807LL #endif #ifndef CHAR_BIT #define CHAR_BIT 8 #endif #ifndef INT32_MAX #define INT32_MAX _TYPE_MAXIMUM(int32_t) #endif #ifdef HAVE_STDBOOL_H #include #endif #if !defined(HAVE_BOOL) #ifdef HAVE__Bool #define bool _Bool #else typedef int bool; #endif #endif #if !defined(HAVE_INTPTR_T) typedef long long intptr_t ; #define __intptr_t_defined #endif #if !defined(HAVE_UINTPTR_T) typedef unsigned long long uintptr_t ; #define __uintptr_t_defined #endif #if !defined(HAVE_PTRDIFF_T) typedef unsigned long long ptrdiff_t ; #endif /* * to prevent from doing a redefine of 'bool' * * IRIX, HPUX, MacOS 10 and Solaris need BOOL_DEFINED * Tru64 needs _BOOL_EXISTS * AIX needs _BOOL,_TRUE,_FALSE */ #ifndef BOOL_DEFINED #define BOOL_DEFINED #endif #ifndef _BOOL_EXISTS #define _BOOL_EXISTS #endif #ifndef _BOOL #define _BOOL #endif #ifndef __bool_true_false_are_defined #define __bool_true_false_are_defined #endif #ifndef true #define true (1) #endif #ifndef false #define false (0) #endif #ifndef _TRUE #define _TRUE true #endif #ifndef _FALSE #define _FALSE false #endif #ifndef HAVE_FUNCTION_MACRO #ifdef HAVE_func_MACRO #define __FUNCTION__ __func__ #else #define __FUNCTION__ ("") #endif #endif #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif #if !defined(HAVE_VOLATILE) #define volatile #endif /** this is a warning hack. The idea is to use this everywhere that we get the "discarding const" warning from gcc. That doesn't actually fix the problem of course, but it means that when we do get to cleaning them up we can do it by searching the code for discard_const. It also means that other error types aren't as swamped by the noise of hundreds of const warnings, so we are more likely to notice when we get new errors. Please only add more uses of this macro when you find it _really_ hard to fix const warnings. Our aim is to eventually use this function in only a very few places. Also, please call this via the discard_const_p() macro interface, as that makes the return type safe. */ #define discard_const(ptr) ((void *)((uintptr_t)(ptr))) /** Type-safe version of discard_const */ #define discard_const_p(type, ptr) ((type *)discard_const(ptr)) #ifndef __STRING #define __STRING(x) #x #endif #ifndef __STRINGSTRING #define __STRINGSTRING(x) __STRING(x) #endif #ifndef __LINESTR__ #define __LINESTR__ __STRINGSTRING(__LINE__) #endif #ifndef __location__ #define __location__ __FILE__ ":" __LINESTR__ #endif /** * Zero a structure. */ #define ZERO_STRUCT(x) memset_s((char *)&(x), sizeof(x), 0, sizeof(x)) /** * Zero a structure given a pointer to the structure. */ #define ZERO_STRUCTP(x) do { \ if ((x) != NULL) { \ memset_s((char *)(x), sizeof(*(x)), 0, sizeof(*(x))); \ } \ } while(0) /** * Zero a structure given a pointer to the structure - no zero check */ #define ZERO_STRUCTPN(x) memset_s((char *)(x), sizeof(*(x)), 0, sizeof(*(x))) /** * Zero an array - note that sizeof(array) must work - ie. it must not be a * pointer */ #define ZERO_ARRAY(x) memset_s((char *)(x), sizeof(x), 0, sizeof(x)) /** * Zero a given len of an array */ #define ZERO_ARRAY_LEN(x, l) memset_s((char *)(x), (l), 0, (l)) /** * Work out how many elements there are in a static array. */ #ifdef ARRAY_SIZE #undef ARRAY_SIZE #endif #define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) /** * Remove an array element by moving the rest one down */ #define ARRAY_DEL_ELEMENT(a,i,n) \ if((i)<((n)-1)){memmove(&((a)[(i)]),&((a)[(i)+1]),(sizeof(*(a))*((n)-(i)-1)));} /** * Pointer difference macro */ #define PTR_DIFF(p1,p2) ((ptrdiff_t)(((const char *)(p1)) - (const char *)(p2))) #ifdef __COMPAR_FN_T #define QSORT_CAST (__compar_fn_t) #endif #ifndef QSORT_CAST #define QSORT_CAST (int (*)(const void *, const void *)) #endif #ifndef PATH_MAX #define PATH_MAX 1024 #endif #ifndef MAX_DNS_NAME_LENGTH #define MAX_DNS_NAME_LENGTH 256 /* Actually 255 but +1 for terminating null. */ #endif #ifdef HAVE_CRYPT_H #include #endif /* these macros gain us a few percent of speed on gcc */ #if (__GNUC__ >= 3) /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1 as its first argument */ #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #else #ifndef likely #define likely(x) (x) #endif #ifndef unlikely #define unlikely(x) (x) #endif #endif #ifndef HAVE_FDATASYNC #define fdatasync(fd) fsync(fd) #elif !defined(HAVE_DECL_FDATASYNC) int fdatasync(int ); #endif /* these are used to mark symbols as local to a shared lib, or * publicly available via the shared lib API */ #ifndef _PUBLIC_ #ifdef HAVE_VISIBILITY_ATTR #define _PUBLIC_ __attribute__((visibility("default"))) #else #define _PUBLIC_ #endif #endif #ifndef _PRIVATE_ #ifdef HAVE_VISIBILITY_ATTR # define _PRIVATE_ __attribute__((visibility("hidden"))) #else # define _PRIVATE_ #endif #endif #ifndef HAVE_POLL #define poll rep_poll /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETPEEREID #define getpeereid rep_getpeereid int rep_getpeereid(int s, uid_t *uid, gid_t *gid); #endif #ifndef HAVE_USLEEP #define usleep rep_usleep typedef long useconds_t; int usleep(useconds_t); #endif #ifndef HAVE_SETPROCTITLE #define setproctitle rep_setproctitle void rep_setproctitle(const char *fmt, ...) PRINTF_ATTRIBUTE(1, 2); #endif #ifndef HAVE_SETPROCTITLE_INIT #define setproctitle_init rep_setproctitle_init void rep_setproctitle_init(int argc, char *argv[], char *envp[]); #endif #ifndef HAVE_MEMSET_S #define memset_s rep_memset_s int rep_memset_s(void *dest, size_t destsz, int ch, size_t count); #endif #ifndef HAVE_GETPROGNAME #define getprogname rep_getprogname const char *rep_getprogname(void); #endif #ifndef HAVE_COPY_FILE_RANGE #define copy_file_range rep_copy_file_range ssize_t rep_copy_file_range(int fd_in, loff_t *off_in, int fd_out, loff_t *off_out, size_t len, unsigned int flags); #endif /* HAVE_COPY_FILE_RANGE */ #ifndef FALL_THROUGH # ifdef HAVE_FALLTHROUGH_ATTRIBUTE # define FALL_THROUGH __attribute__ ((fallthrough)) # else /* HAVE_FALLTHROUGH_ATTRIBUTE */ # define FALL_THROUGH ((void)0) # endif /* HAVE_FALLTHROUGH_ATTRIBUTE */ #endif /* FALL_THROUGH */ bool nss_wrapper_enabled(void); bool nss_wrapper_hosts_enabled(void); bool socket_wrapper_enabled(void); bool uid_wrapper_enabled(void); static inline bool _hexcharval(char c, uint8_t *val) { if ((c >= '0') && (c <= '9')) { *val = c - '0'; return true; } if ((c >= 'a') && (c <= 'f')) { *val = c - 'a' + 10; return true; } if ((c >= 'A') && (c <= 'F')) { *val = c - 'A' + 10; return true; } return false; } static inline bool hex_byte(const char *in, uint8_t *out) { uint8_t hi=0, lo=0; bool ok = _hexcharval(in[0], &hi) && _hexcharval(in[1], &lo); *out = (hi<<4)+lo; return ok; } /* Needed for Solaris atomic_add_XX functions. */ #if defined(HAVE_SYS_ATOMIC_H) #include #endif #endif /* _LIBREPLACE_REPLACE_H */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/snprintf.c0000660000000000000000000010674600000000000017241 0ustar00rootroot00000000000000/* * NOTE: If you change this file, please merge it into rsync, samba, etc. */ /* * Copyright Patrick Powell 1995 * This code is based on code written by Patrick Powell (papowell@astart.com) * It may be used for any purpose as long as this notice remains intact * on all source code distributions */ /************************************************************** * Original: * Patrick Powell Tue Apr 11 09:48:21 PDT 1995 * A bombproof version of doprnt (dopr) included. * Sigh. This sort of thing is always nasty do deal with. Note that * the version here does not include floating point... * * snprintf() is used instead of sprintf() as it does limit checks * for string length. This covers a nasty loophole. * * The other functions are there to prevent NULL pointers from * causing nast effects. * * More Recently: * Brandon Long 9/15/96 for mutt 0.43 * This was ugly. It is still ugly. I opted out of floating point * numbers, but the formatter understands just about everything * from the normal C string format, at least as far as I can tell from * the Solaris 2.5 printf(3S) man page. * * Brandon Long 10/22/97 for mutt 0.87.1 * Ok, added some minimal floating point support, which means this * probably requires libm on most operating systems. Don't yet * support the exponent (e,E) and sigfig (g,G). Also, fmtint() * was pretty badly broken, it just wasn't being exercised in ways * which showed it, so that's been fixed. Also, formatted the code * to mutt conventions, and removed dead code left over from the * original. Also, there is now a builtin-test, just compile with: * gcc -DTEST_SNPRINTF -o snprintf snprintf.c -lm * and run snprintf for results. * * Thomas Roessler 01/27/98 for mutt 0.89i * The PGP code was using unsigned hexadecimal formats. * Unfortunately, unsigned formats simply didn't work. * * Michael Elkins 03/05/98 for mutt 0.90.8 * The original code assumed that both snprintf() and vsnprintf() were * missing. Some systems only have snprintf() but not vsnprintf(), so * the code is now broken down under HAVE_SNPRINTF and HAVE_VSNPRINTF. * * Andrew Tridgell (tridge@samba.org) Oct 1998 * fixed handling of %.0f * added test for HAVE_LONG_DOUBLE * * tridge@samba.org, idra@samba.org, April 2001 * got rid of fcvt code (twas buggy and made testing harder) * added C99 semantics * * date: 2002/12/19 19:56:31; author: herb; state: Exp; lines: +2 -0 * actually print args for %g and %e * * date: 2002/06/03 13:37:52; author: jmcd; state: Exp; lines: +8 -0 * Since includes.h isn't included here, VA_COPY has to be defined here. I don't * see any include file that is guaranteed to be here, so I'm defining it * locally. Fixes AIX and Solaris builds. * * date: 2002/06/03 03:07:24; author: tridge; state: Exp; lines: +5 -13 * put the ifdef for HAVE_VA_COPY in one place rather than in lots of * functions * * date: 2002/05/17 14:51:22; author: jmcd; state: Exp; lines: +21 -4 * Fix usage of va_list passed as an arg. Use __va_copy before using it * when it exists. * * date: 2002/04/16 22:38:04; author: idra; state: Exp; lines: +20 -14 * Fix incorrect zpadlen handling in fmtfp. * Thanks to Ollie Oldham for spotting it. * few mods to make it easier to compile the tests. * addedd the "Ollie" test to the floating point ones. * * Martin Pool (mbp@samba.org) April 2003 * Remove NO_CONFIG_H so that the test case can be built within a source * tree with less trouble. * Remove unnecessary SAFE_FREE() definition. * * Martin Pool (mbp@samba.org) May 2003 * Put in a prototype for dummy_snprintf() to quiet compiler warnings. * * Move #endif to make sure VA_COPY, LDOUBLE, etc are defined even * if the C library has some snprintf functions already. * * Darren Tucker (dtucker@zip.com.au) 2005 * Fix bug allowing read overruns of the source string with "%.*s" * Usually harmless unless the read runs outside the process' allocation * (eg if your malloc does guard pages) in which case it will segfault. * From OpenSSH. Also added test for same. * * Simo Sorce (idra@samba.org) Jan 2006 * * Add support for position independent parameters * fix fmtstr now it conforms to sprintf wrt min.max * **************************************************************/ #include "replace.h" #include "system/locale.h" #ifdef TEST_SNPRINTF /* need math library headers for testing */ /* In test mode, we pretend that this system doesn't have any snprintf * functions, regardless of what config.h says. */ # undef HAVE_SNPRINTF # undef HAVE_VSNPRINTF # undef HAVE_C99_VSNPRINTF # undef HAVE_ASPRINTF # undef HAVE_VASPRINTF # include #endif /* TEST_SNPRINTF */ #if defined(HAVE_SNPRINTF) && defined(HAVE_VSNPRINTF) && defined(HAVE_C99_VSNPRINTF) /* only include stdio.h if we are not re-defining snprintf or vsnprintf */ #include /* make the compiler happy with an empty file */ void dummy_snprintf(void); void dummy_snprintf(void) {} #endif /* HAVE_SNPRINTF, etc */ /* yes this really must be a ||. Don't muck with this (tridge) */ #if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #ifdef HAVE_LONG_DOUBLE #define LDOUBLE long double #else #define LDOUBLE double #endif #ifdef HAVE_LONG_LONG #define LLONG long long #else #define LLONG long #endif #ifndef VA_COPY #ifdef HAVE_VA_COPY #define VA_COPY(dest, src) va_copy(dest, src) #else #ifdef HAVE___VA_COPY #define VA_COPY(dest, src) __va_copy(dest, src) #else #define VA_COPY(dest, src) (dest) = (src) #endif #endif /* * dopr(): poor man's version of doprintf */ /* format read states */ #define DP_S_DEFAULT 0 #define DP_S_FLAGS 1 #define DP_S_MIN 2 #define DP_S_DOT 3 #define DP_S_MAX 4 #define DP_S_MOD 5 #define DP_S_CONV 6 #define DP_S_DONE 7 /* format flags - Bits */ #define DP_F_MINUS (1 << 0) #define DP_F_PLUS (1 << 1) #define DP_F_SPACE (1 << 2) #define DP_F_NUM (1 << 3) #define DP_F_ZERO (1 << 4) #define DP_F_UP (1 << 5) #define DP_F_UNSIGNED (1 << 6) /* Conversion Flags */ #define DP_C_CHAR 1 #define DP_C_SHORT 2 #define DP_C_LONG 3 #define DP_C_LDOUBLE 4 #define DP_C_LLONG 5 #define DP_C_SIZET 6 /* Chunk types */ #define CNK_FMT_STR 0 #define CNK_INT 1 #define CNK_OCTAL 2 #define CNK_UINT 3 #define CNK_HEX 4 #define CNK_FLOAT 5 #define CNK_CHAR 6 #define CNK_STRING 7 #define CNK_PTR 8 #define CNK_NUM 9 #define CNK_PRCNT 10 #define char_to_int(p) ((p)- '0') #ifndef MAX #define MAX(p,q) (((p) >= (q)) ? (p) : (q)) #endif struct pr_chunk { int type; /* chunk type */ int num; /* parameter number */ int min; int max; int flags; int cflags; int start; int len; LLONG value; LDOUBLE fvalue; char *strvalue; void *pnum; struct pr_chunk *min_star; struct pr_chunk *max_star; struct pr_chunk *next; }; struct pr_chunk_x { struct pr_chunk **chunks; int num; }; static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in); static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, char *value, int flags, int min, int max); static void fmtint(char *buffer, size_t *currlen, size_t maxlen, LLONG value, int base, int min, int max, int flags); static void fmtfp(char *buffer, size_t *currlen, size_t maxlen, LDOUBLE fvalue, int min, int max, int flags); static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c); static struct pr_chunk *new_chunk(void); static int add_cnk_list_entry(struct pr_chunk_x **list, int max_num, struct pr_chunk *chunk); static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in) { char ch; int state; int pflag; int pnum; int pfirst; size_t currlen; va_list args; const char *base; struct pr_chunk *chunks = NULL; struct pr_chunk *cnk = NULL; struct pr_chunk_x *clist = NULL; int max_pos; int ret = -1; VA_COPY(args, args_in); state = DP_S_DEFAULT; pfirst = 1; pflag = 0; pnum = 0; max_pos = 0; base = format; ch = *format++; /* retrieve the string structure as chunks */ while (state != DP_S_DONE) { if (ch == '\0') state = DP_S_DONE; switch(state) { case DP_S_DEFAULT: if (cnk) { cnk->next = new_chunk(); cnk = cnk->next; } else { cnk = new_chunk(); } if (!cnk) goto done; if (!chunks) chunks = cnk; if (ch == '%') { state = DP_S_FLAGS; ch = *format++; } else { cnk->type = CNK_FMT_STR; cnk->start = format - base -1; while ((ch != '\0') && (ch != '%')) ch = *format++; cnk->len = format - base - cnk->start -1; } break; case DP_S_FLAGS: switch (ch) { case '-': cnk->flags |= DP_F_MINUS; ch = *format++; break; case '+': cnk->flags |= DP_F_PLUS; ch = *format++; break; case ' ': cnk->flags |= DP_F_SPACE; ch = *format++; break; case '#': cnk->flags |= DP_F_NUM; ch = *format++; break; case '0': cnk->flags |= DP_F_ZERO; ch = *format++; break; case 'I': /* internationalization not supported yet */ ch = *format++; break; default: state = DP_S_MIN; break; } break; case DP_S_MIN: if (isdigit((unsigned char)ch)) { cnk->min = 10 * cnk->min + char_to_int (ch); ch = *format++; } else if (ch == '$') { if (!pfirst && !pflag) { /* parameters must be all positioned or none */ goto done; } if (pfirst) { pfirst = 0; pflag = 1; } if (cnk->min == 0) /* what ?? */ goto done; cnk->num = cnk->min; cnk->min = 0; ch = *format++; } else if (ch == '*') { if (pfirst) pfirst = 0; cnk->min_star = new_chunk(); if (!cnk->min_star) /* out of memory :-( */ goto done; cnk->min_star->type = CNK_INT; if (pflag) { int num; ch = *format++; if (!isdigit((unsigned char)ch)) { /* parameters must be all positioned or none */ goto done; } for (num = 0; isdigit((unsigned char)ch); ch = *format++) { num = 10 * num + char_to_int(ch); } cnk->min_star->num = num; if (ch != '$') /* what ?? */ goto done; } else { cnk->min_star->num = ++pnum; } max_pos = add_cnk_list_entry(&clist, max_pos, cnk->min_star); if (max_pos == 0) /* out of memory :-( */ goto done; ch = *format++; state = DP_S_DOT; } else { if (pfirst) pfirst = 0; state = DP_S_DOT; } break; case DP_S_DOT: if (ch == '.') { state = DP_S_MAX; ch = *format++; } else { state = DP_S_MOD; } break; case DP_S_MAX: if (isdigit((unsigned char)ch)) { if (cnk->max < 0) cnk->max = 0; cnk->max = 10 * cnk->max + char_to_int (ch); ch = *format++; } else if (ch == '$') { if (!pfirst && !pflag) { /* parameters must be all positioned or none */ goto done; } if (cnk->max <= 0) /* what ?? */ goto done; cnk->num = cnk->max; cnk->max = -1; ch = *format++; } else if (ch == '*') { cnk->max_star = new_chunk(); if (!cnk->max_star) /* out of memory :-( */ goto done; cnk->max_star->type = CNK_INT; if (pflag) { int num; ch = *format++; if (!isdigit((unsigned char)ch)) { /* parameters must be all positioned or none */ goto done; } for (num = 0; isdigit((unsigned char)ch); ch = *format++) { num = 10 * num + char_to_int(ch); } cnk->max_star->num = num; if (ch != '$') /* what ?? */ goto done; } else { cnk->max_star->num = ++pnum; } max_pos = add_cnk_list_entry(&clist, max_pos, cnk->max_star); if (max_pos == 0) /* out of memory :-( */ goto done; ch = *format++; state = DP_S_MOD; } else { state = DP_S_MOD; } break; case DP_S_MOD: switch (ch) { case 'h': cnk->cflags = DP_C_SHORT; ch = *format++; if (ch == 'h') { cnk->cflags = DP_C_CHAR; ch = *format++; } break; case 'l': cnk->cflags = DP_C_LONG; ch = *format++; if (ch == 'l') { /* It's a long long */ cnk->cflags = DP_C_LLONG; ch = *format++; } break; case 'j': cnk->cflags = DP_C_LLONG; ch = *format++; break; case 'L': cnk->cflags = DP_C_LDOUBLE; ch = *format++; break; case 'z': cnk->cflags = DP_C_SIZET; ch = *format++; break; default: break; } state = DP_S_CONV; break; case DP_S_CONV: if (cnk->num == 0) cnk->num = ++pnum; max_pos = add_cnk_list_entry(&clist, max_pos, cnk); if (max_pos == 0) /* out of memory :-( */ goto done; switch (ch) { case 'd': case 'i': cnk->type = CNK_INT; break; case 'o': cnk->type = CNK_OCTAL; cnk->flags |= DP_F_UNSIGNED; break; case 'u': cnk->type = CNK_UINT; cnk->flags |= DP_F_UNSIGNED; break; case 'X': cnk->flags |= DP_F_UP; case 'x': cnk->type = CNK_HEX; cnk->flags |= DP_F_UNSIGNED; break; case 'A': /* hex float not supported yet */ case 'E': case 'G': case 'F': cnk->flags |= DP_F_UP; case 'a': /* hex float not supported yet */ case 'e': case 'f': case 'g': cnk->type = CNK_FLOAT; break; case 'c': cnk->type = CNK_CHAR; break; case 's': cnk->type = CNK_STRING; break; case 'p': cnk->type = CNK_PTR; cnk->flags |= DP_F_UNSIGNED; break; case 'n': cnk->type = CNK_NUM; break; case '%': cnk->type = CNK_PRCNT; break; default: /* Unknown, bail out*/ goto done; } ch = *format++; state = DP_S_DEFAULT; break; case DP_S_DONE: break; default: /* hmm? */ break; /* some picky compilers need this */ } } /* retrieve the format arguments */ for (pnum = 0; pnum < max_pos; pnum++) { int i; if (clist[pnum].num == 0) { /* ignoring a parameter should not be permitted * all parameters must be matched at least once * BUT seem some system ignore this rule ... * at least my glibc based system does --SSS */ #ifdef DEBUG_SNPRINTF printf("parameter at position %d not used\n", pnum+1); #endif /* eat the parameter */ va_arg (args, int); continue; } for (i = 1; i < clist[pnum].num; i++) { if (clist[pnum].chunks[0]->type != clist[pnum].chunks[i]->type) { /* nooo noo no! * all the references to a parameter * must be of the same type */ goto done; } } cnk = clist[pnum].chunks[0]; switch (cnk->type) { case CNK_INT: if (cnk->cflags == DP_C_SHORT) cnk->value = va_arg (args, int); else if (cnk->cflags == DP_C_LONG) cnk->value = va_arg (args, long int); else if (cnk->cflags == DP_C_LLONG) cnk->value = va_arg (args, LLONG); else if (cnk->cflags == DP_C_SIZET) cnk->value = va_arg (args, ssize_t); else cnk->value = va_arg (args, int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_OCTAL: case CNK_UINT: case CNK_HEX: if (cnk->cflags == DP_C_SHORT) cnk->value = va_arg (args, unsigned int); else if (cnk->cflags == DP_C_LONG) cnk->value = (unsigned long int)va_arg (args, unsigned long int); else if (cnk->cflags == DP_C_LLONG) cnk->value = (LLONG)va_arg (args, unsigned LLONG); else if (cnk->cflags == DP_C_SIZET) cnk->value = (size_t)va_arg (args, size_t); else cnk->value = (unsigned int)va_arg (args, unsigned int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_FLOAT: if (cnk->cflags == DP_C_LDOUBLE) cnk->fvalue = va_arg (args, LDOUBLE); else cnk->fvalue = va_arg (args, double); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->fvalue = cnk->fvalue; } break; case CNK_CHAR: cnk->value = va_arg (args, int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_STRING: cnk->strvalue = va_arg (args, char *); if (!cnk->strvalue) cnk->strvalue = "(NULL)"; for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->strvalue = cnk->strvalue; } break; case CNK_PTR: cnk->strvalue = va_arg (args, void *); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->strvalue = cnk->strvalue; } break; case CNK_NUM: if (cnk->cflags == DP_C_CHAR) cnk->pnum = va_arg (args, char *); else if (cnk->cflags == DP_C_SHORT) cnk->pnum = va_arg (args, short int *); else if (cnk->cflags == DP_C_LONG) cnk->pnum = va_arg (args, long int *); else if (cnk->cflags == DP_C_LLONG) cnk->pnum = va_arg (args, LLONG *); else if (cnk->cflags == DP_C_SIZET) cnk->pnum = va_arg (args, ssize_t *); else cnk->pnum = va_arg (args, int *); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->pnum = cnk->pnum; } break; case CNK_PRCNT: break; default: /* what ?? */ goto done; } } /* print out the actual string from chunks */ currlen = 0; cnk = chunks; while (cnk) { int len, min, max; if (cnk->min_star) min = cnk->min_star->value; else min = cnk->min; if (cnk->max_star) max = cnk->max_star->value; else max = cnk->max; switch (cnk->type) { case CNK_FMT_STR: if (maxlen != 0 && maxlen > currlen) { if (maxlen > (currlen + cnk->len)) len = cnk->len; else len = maxlen - currlen; memcpy(&(buffer[currlen]), &(base[cnk->start]), len); } currlen += cnk->len; break; case CNK_INT: case CNK_UINT: fmtint (buffer, &currlen, maxlen, cnk->value, 10, min, max, cnk->flags); break; case CNK_OCTAL: fmtint (buffer, &currlen, maxlen, cnk->value, 8, min, max, cnk->flags); break; case CNK_HEX: fmtint (buffer, &currlen, maxlen, cnk->value, 16, min, max, cnk->flags); break; case CNK_FLOAT: fmtfp (buffer, &currlen, maxlen, cnk->fvalue, min, max, cnk->flags); break; case CNK_CHAR: dopr_outch (buffer, &currlen, maxlen, cnk->value); break; case CNK_STRING: if (max == -1) { max = strlen(cnk->strvalue); } fmtstr (buffer, &currlen, maxlen, cnk->strvalue, cnk->flags, min, max); break; case CNK_PTR: fmtint (buffer, &currlen, maxlen, (long)(cnk->strvalue), 16, min, max, cnk->flags); break; case CNK_NUM: if (cnk->cflags == DP_C_CHAR) *((char *)(cnk->pnum)) = (char)currlen; else if (cnk->cflags == DP_C_SHORT) *((short int *)(cnk->pnum)) = (short int)currlen; else if (cnk->cflags == DP_C_LONG) *((long int *)(cnk->pnum)) = (long int)currlen; else if (cnk->cflags == DP_C_LLONG) *((LLONG *)(cnk->pnum)) = (LLONG)currlen; else if (cnk->cflags == DP_C_SIZET) *((ssize_t *)(cnk->pnum)) = (ssize_t)currlen; else *((int *)(cnk->pnum)) = (int)currlen; break; case CNK_PRCNT: dopr_outch (buffer, &currlen, maxlen, '%'); break; default: /* what ?? */ goto done; } cnk = cnk->next; } if (maxlen != 0) { if (currlen < maxlen - 1) buffer[currlen] = '\0'; else if (maxlen > 0) buffer[maxlen - 1] = '\0'; } ret = currlen; done: va_end(args); while (chunks) { cnk = chunks->next; free(chunks); chunks = cnk; } if (clist) { for (pnum = 0; pnum < max_pos; pnum++) { if (clist[pnum].chunks) free(clist[pnum].chunks); } free(clist); } return ret; } static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, char *value, int flags, int min, int max) { int padlen, strln; /* amount to pad */ int cnt = 0; #ifdef DEBUG_SNPRINTF printf("fmtstr min=%d max=%d s=[%s]\n", min, max, value); #endif if (value == 0) { value = ""; } for (strln = 0; strln < max && value[strln]; ++strln); /* strlen */ padlen = min - strln; if (padlen < 0) padlen = 0; if (flags & DP_F_MINUS) padlen = -padlen; /* Left Justify */ while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --padlen; } while (*value && (cnt < max)) { dopr_outch (buffer, currlen, maxlen, *value++); ++cnt; } while (padlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++padlen; } } /* Have to handle DP_F_NUM (ie 0x and 0 alternates) */ static void fmtint(char *buffer, size_t *currlen, size_t maxlen, LLONG value, int base, int min, int max, int flags) { int signvalue = 0; unsigned LLONG uvalue; char convert[22+1]; /* 64-bit value in octal: 22 digits + \0 */ int place = 0; int spadlen = 0; /* amount to space pad */ int zpadlen = 0; /* amount to zero pad */ int caps = 0; if (max < 0) max = 0; uvalue = value; if(!(flags & DP_F_UNSIGNED)) { if( value < 0 ) { signvalue = '-'; uvalue = -value; } else { if (flags & DP_F_PLUS) /* Do a sign (+/i) */ signvalue = '+'; else if (flags & DP_F_SPACE) signvalue = ' '; } } if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ do { convert[place++] = (caps? "0123456789ABCDEF":"0123456789abcdef") [uvalue % (unsigned)base ]; uvalue = (uvalue / (unsigned)base ); } while(uvalue && (place < sizeof(convert))); if (place == sizeof(convert)) place--; convert[place] = 0; zpadlen = max - place; spadlen = min - MAX (max, place) - (signvalue ? 1 : 0); if (zpadlen < 0) zpadlen = 0; if (spadlen < 0) spadlen = 0; if (flags & DP_F_ZERO) { zpadlen = MAX(zpadlen, spadlen); spadlen = 0; } if (flags & DP_F_MINUS) spadlen = -spadlen; /* Left Justifty */ #ifdef DEBUG_SNPRINTF printf("zpad: %d, spad: %d, min: %d, max: %d, place: %d\n", zpadlen, spadlen, min, max, place); #endif /* Spaces */ while (spadlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --spadlen; } /* Sign */ if (signvalue) dopr_outch (buffer, currlen, maxlen, signvalue); /* Zeros */ if (zpadlen > 0) { while (zpadlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --zpadlen; } } /* Digits */ while (place > 0) dopr_outch (buffer, currlen, maxlen, convert[--place]); /* Left Justified spaces */ while (spadlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++spadlen; } } static LDOUBLE abs_val(LDOUBLE value) { LDOUBLE result = value; if (value < 0) result = -value; return result; } static LDOUBLE POW10(int exp) { LDOUBLE result = 1; while (exp) { result *= 10; exp--; } return result; } static LLONG ROUND(LDOUBLE value) { LLONG intpart; intpart = (LLONG)value; value = value - intpart; if (value >= 0.5) intpart++; return intpart; } /* a replacement for modf that doesn't need the math library. Should be portable, but slow */ static double my_modf(double x0, double *iptr) { int i; LLONG l=0; double x = x0; double f = 1.0; for (i=0;i<100;i++) { l = (long)x; if (l <= (x+1) && l >= (x-1)) break; x *= 0.1; f *= 10.0; } if (i == 100) { /* yikes! the number is beyond what we can handle. What do we do? */ (*iptr) = 0; return 0; } if (i != 0) { double i2; double ret; ret = my_modf(x0-l*f, &i2); (*iptr) = l*f + i2; return ret; } (*iptr) = l; return x - (*iptr); } static void fmtfp (char *buffer, size_t *currlen, size_t maxlen, LDOUBLE fvalue, int min, int max, int flags) { int signvalue = 0; double ufvalue; char iconvert[311]; char fconvert[311]; int iplace = 0; int fplace = 0; int padlen = 0; /* amount to pad */ int zpadlen = 0; int caps = 0; int idx; double intpart; double fracpart; double temp; /* * AIX manpage says the default is 0, but Solaris says the default * is 6, and sprintf on AIX defaults to 6 */ if (max < 0) max = 6; ufvalue = abs_val (fvalue); if (fvalue < 0) { signvalue = '-'; } else { if (flags & DP_F_PLUS) { /* Do a sign (+/i) */ signvalue = '+'; } else { if (flags & DP_F_SPACE) signvalue = ' '; } } #if 0 if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ #endif #if 0 if (max == 0) ufvalue += 0.5; /* if max = 0 we must round */ #endif /* * Sorry, we only support 9 digits past the decimal because of our * conversion method */ if (max > 9) max = 9; /* We "cheat" by converting the fractional part to integer by * multiplying by a factor of 10 */ temp = ufvalue; my_modf(temp, &intpart); fracpart = ROUND((POW10(max)) * (ufvalue - intpart)); if (fracpart >= POW10(max)) { intpart++; fracpart -= POW10(max); } /* Convert integer part */ do { temp = intpart*0.1; my_modf(temp, &intpart); idx = (int) ((temp -intpart +0.05)* 10.0); /* idx = (int) (((double)(temp*0.1) -intpart +0.05) *10.0); */ /* printf ("%llf, %f, %x\n", temp, intpart, idx); */ iconvert[iplace++] = (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; } while (intpart && (iplace < 311)); if (iplace == 311) iplace--; iconvert[iplace] = 0; /* Convert fractional part */ if (fracpart) { do { temp = fracpart*0.1; my_modf(temp, &fracpart); idx = (int) ((temp -fracpart +0.05)* 10.0); /* idx = (int) ((((temp/10) -fracpart) +0.05) *10); */ /* printf ("%lf, %lf, %ld\n", temp, fracpart, idx ); */ fconvert[fplace++] = (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; } while(fracpart && (fplace < 311)); if (fplace == 311) fplace--; } fconvert[fplace] = 0; /* -1 for decimal point, another -1 if we are printing a sign */ padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0); zpadlen = max - fplace; if (zpadlen < 0) zpadlen = 0; if (padlen < 0) padlen = 0; if (flags & DP_F_MINUS) padlen = -padlen; /* Left Justifty */ if ((flags & DP_F_ZERO) && (padlen > 0)) { if (signvalue) { dopr_outch (buffer, currlen, maxlen, signvalue); --padlen; signvalue = 0; } while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --padlen; } } while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --padlen; } if (signvalue) dopr_outch (buffer, currlen, maxlen, signvalue); while (iplace > 0) dopr_outch (buffer, currlen, maxlen, iconvert[--iplace]); #ifdef DEBUG_SNPRINTF printf("fmtfp: fplace=%d zpadlen=%d\n", fplace, zpadlen); #endif /* * Decimal point. This should probably use locale to find the correct * char to print out. */ if (max > 0) { dopr_outch (buffer, currlen, maxlen, '.'); while (zpadlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --zpadlen; } while (fplace > 0) dopr_outch (buffer, currlen, maxlen, fconvert[--fplace]); } while (padlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++padlen; } } static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c) { if (*currlen < maxlen) { buffer[(*currlen)] = c; } (*currlen)++; } static struct pr_chunk *new_chunk(void) { struct pr_chunk *new_c = (struct pr_chunk *)malloc(sizeof(struct pr_chunk)); if (!new_c) return NULL; new_c->type = 0; new_c->num = 0; new_c->min = 0; new_c->min_star = NULL; new_c->max = -1; new_c->max_star = NULL; new_c->flags = 0; new_c->cflags = 0; new_c->start = 0; new_c->len = 0; new_c->value = 0; new_c->fvalue = 0; new_c->strvalue = NULL; new_c->pnum = NULL; new_c->next = NULL; return new_c; } static int add_cnk_list_entry(struct pr_chunk_x **list, int max_num, struct pr_chunk *chunk) { struct pr_chunk_x *l; struct pr_chunk **c; int max; int cnum; int i, pos; if (chunk->num > max_num) { max = chunk->num; if (*list == NULL) { l = (struct pr_chunk_x *)malloc(sizeof(struct pr_chunk_x) * max); pos = 0; } else { l = (struct pr_chunk_x *)realloc(*list, sizeof(struct pr_chunk_x) * max); pos = max_num; } if (l == NULL) { for (i = 0; i < max; i++) { if ((*list)[i].chunks) free((*list)[i].chunks); } return 0; } for (i = pos; i < max; i++) { l[i].chunks = NULL; l[i].num = 0; } } else { l = *list; max = max_num; } i = chunk->num - 1; cnum = l[i].num + 1; if (l[i].chunks == NULL) { c = (struct pr_chunk **)malloc(sizeof(struct pr_chunk *) * cnum); } else { c = (struct pr_chunk **)realloc(l[i].chunks, sizeof(struct pr_chunk *) * cnum); } if (c == NULL) { for (i = 0; i < max; i++) { if (l[i].chunks) free(l[i].chunks); } return 0; } c[l[i].num] = chunk; l[i].chunks = c; l[i].num = cnum; *list = l; return max; } int rep_vsnprintf (char *str, size_t count, const char *fmt, va_list args) { return dopr(str, count, fmt, args); } #endif /* yes this really must be a ||. Don't muck with this (tridge) * * The logic for these two is that we need our own definition if the * OS *either* has no definition of *sprintf, or if it does have one * that doesn't work properly according to the autoconf test. */ #if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_snprintf(char *str,size_t count,const char *fmt,...) { size_t ret; va_list ap; va_start(ap, fmt); ret = vsnprintf(str, count, fmt, ap); va_end(ap); return ret; } #endif #ifndef HAVE_C99_VSNPRINTF int rep_printf(const char *fmt, ...) { va_list ap; int ret; char *s; s = NULL; va_start(ap, fmt); ret = vasprintf(&s, fmt, ap); va_end(ap); if (s) { fwrite(s, 1, strlen(s), stdout); } free(s); return ret; } #endif #ifndef HAVE_C99_VSNPRINTF int rep_fprintf(FILE *stream, const char *fmt, ...) { va_list ap; int ret; char *s; s = NULL; va_start(ap, fmt); ret = vasprintf(&s, fmt, ap); va_end(ap); if (s) { fwrite(s, 1, strlen(s), stream); } free(s); return ret; } #endif #endif #if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_vasprintf(char **ptr, const char *format, va_list ap) { int ret; va_list ap2; VA_COPY(ap2, ap); ret = vsnprintf(NULL, 0, format, ap2); va_end(ap2); if (ret < 0) return ret; (*ptr) = (char *)malloc(ret+1); if (!*ptr) return -1; VA_COPY(ap2, ap); ret = vsnprintf(*ptr, ret+1, format, ap2); va_end(ap2); return ret; } #endif #if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_asprintf(char **ptr, const char *format, ...) { va_list ap; int ret; *ptr = NULL; va_start(ap, format); ret = vasprintf(ptr, format, ap); va_end(ap); return ret; } #endif #ifdef TEST_SNPRINTF int sprintf(char *str,const char *fmt,...); int printf(const char *fmt,...); int main (void) { char buf1[1024]; char buf2[1024]; char *buf3; char *fp_fmt[] = { "%1.1f", "%-1.5f", "%1.5f", "%123.9f", "%10.5f", "% 10.5f", "%+22.9f", "%+4.9f", "%01.3f", "%4f", "%3.1f", "%3.2f", "%.0f", "%f", "%-8.8f", "%-9.9f", NULL }; double fp_nums[] = { 6442452944.1234, -1.5, 134.21, 91340.2, 341.1234, 203.9, 0.96, 0.996, 0.9996, 1.996, 4.136, 5.030201, 0.00205, /* END LIST */ 0}; char *int_fmt[] = { "%-1.5d", "%1.5d", "%123.9d", "%5.5d", "%10.5d", "% 10.5d", "%+22.33d", "%01.3d", "%4d", "%d", NULL }; long int_nums[] = { -1, 134, 91340, 341, 0203, 1234567890, 0}; char *str_fmt[] = { "%10.5s", "%-10.5s", "%5.10s", "%-5.10s", "%10.1s", "%0.10s", "%10.0s", "%1.10s", "%s", "%.1s", "%.10s", "%10s", NULL }; char *str_vals[] = {"hello", "a", "", "a longer string", NULL}; #ifdef HAVE_LONG_LONG char *ll_fmt[] = { "%llu", NULL }; LLONG ll_nums[] = { 134, 91340, 341, 0203, 1234567890, 128006186140000000LL, 0}; #endif int x, y; int fail = 0; int num = 0; int l1, l2; char *ss_fmt[] = { "%zd", "%zu", NULL }; size_t ss_nums[] = {134, 91340, 123456789, 0203, 1234567890, 0}; printf ("Testing snprintf format codes against system sprintf...\n"); for (x = 0; fp_fmt[x] ; x++) { for (y = 0; fp_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), fp_fmt[x], fp_nums[y]); l2 = sprintf (buf2, fp_fmt[x], fp_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", fp_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } for (x = 0; int_fmt[x] ; x++) { for (y = 0; int_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), int_fmt[x], int_nums[y]); l2 = sprintf (buf2, int_fmt[x], int_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", int_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } for (x = 0; str_fmt[x] ; x++) { for (y = 0; str_vals[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), str_fmt[x], str_vals[y]); l2 = sprintf (buf2, str_fmt[x], str_vals[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", str_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #ifdef HAVE_LONG_LONG for (x = 0; ll_fmt[x] ; x++) { for (y = 0; ll_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), ll_fmt[x], ll_nums[y]); l2 = sprintf (buf2, ll_fmt[x], ll_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", ll_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #endif #define BUFSZ 2048 buf1[0] = buf2[0] = '\0'; if ((buf3 = malloc(BUFSZ)) == NULL) { fail++; } else { num++; memset(buf3, 'a', BUFSZ); snprintf(buf1, sizeof(buf1), "%.*s", 1, buf3); buf1[1023] = '\0'; if (strcmp(buf1, "a") != 0) { printf("length limit buf1 '%s' expected 'a'\n", buf1); fail++; } } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); l2 = sprintf(buf2, "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); fail++; } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); l2 = sprintf(buf2, "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); fail++; } for (x = 0; ss_fmt[x] ; x++) { for (y = 0; ss_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), ss_fmt[x], ss_nums[y]); l2 = sprintf (buf2, ss_fmt[x], ss_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", ss_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #if 0 buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%lld", (LLONG)1234567890); l2 = sprintf(buf2, "%lld", (LLONG)1234567890); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%lld", l1, buf1, l2, buf2); fail++; } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%Lf", (LDOUBLE)890.1234567890123); l2 = sprintf(buf2, "%Lf", (LDOUBLE)890.1234567890123); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%Lf", l1, buf1, l2, buf2); fail++; } #endif printf ("%d tests failed out of %d.\n", fail, num); printf("seeing how many digits we support\n"); { double v0 = 0.12345678901234567890123456789012345678901; for (x=0; x<100; x++) { double p = pow(10, x); double r = v0*p; snprintf(buf1, sizeof(buf1), "%1.1f", r); sprintf(buf2, "%1.1f", r); if (strcmp(buf1, buf2)) { printf("we seem to support %d digits\n", x-1); break; } } } return 0; } #endif /* TEST_SNPRINTF */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/socket.c0000660000000000000000000000230700000000000016652 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * * Dummy replacements for socket functions. * * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" int rep_connect(int sockfd, const struct sockaddr *serv_addr, socklen_t addrlen) { errno = ENOSYS; return -1; } struct hostent *rep_gethostbyname(const char *name) { errno = ENOSYS; return NULL; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/socketpair.c0000660000000000000000000000250100000000000017522 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement routines for broken systems * Copyright (C) Jelmer Vernooij 2006 * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" int rep_socketpair(int d, int type, int protocol, int sv[2]) { if (d != AF_UNIX) { errno = EAFNOSUPPORT; return -1; } if (protocol != 0) { errno = EPROTONOSUPPORT; return -1; } if (type != SOCK_STREAM) { errno = EOPNOTSUPP; return -1; } return pipe(sv); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/strptime.c0000660000000000000000000005674100000000000017244 0ustar00rootroot00000000000000/* Convert a string representation of time to a time value. Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper , 1996. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; see the file COPYING.LIB. If not, see . */ /* XXX This version of the implementation is not really complete. Some of the fields cannot add information alone. But if seeing some of them in the same format (such as year, week and weekday) this is enough information for determining the date. */ #include "replace.h" #include "system/locale.h" #include "system/time.h" #ifndef __P # if defined (__GNUC__) || (defined (__STDC__) && __STDC__) # define __P(args) args # else # define __P(args) () # endif /* GCC. */ #endif /* Not __P. */ #if ! HAVE_LOCALTIME_R && ! defined localtime_r # ifdef _LIBC # define localtime_r __localtime_r # else /* Approximate localtime_r as best we can in its absence. */ # define localtime_r my_localtime_r static struct tm *localtime_r __P ((const time_t *, struct tm *)); static struct tm * localtime_r (t, tp) const time_t *t; struct tm *tp; { struct tm *l = localtime (t); if (! l) return 0; *tp = *l; return tp; } # endif /* ! _LIBC */ #endif /* ! HAVE_LOCALTIME_R && ! defined (localtime_r) */ #define match_char(ch1, ch2) if (ch1 != ch2) return NULL #if defined __GNUC__ && __GNUC__ >= 2 # define match_string(cs1, s2) \ ({ size_t len = strlen (cs1); \ int result = strncasecmp ((cs1), (s2), len) == 0; \ if (result) (s2) += len; \ result; }) #else /* Oh come on. Get a reasonable compiler. */ # define match_string(cs1, s2) \ (strncasecmp ((cs1), (s2), strlen (cs1)) ? 0 : ((s2) += strlen (cs1), 1)) #endif /* We intentionally do not use isdigit() for testing because this will lead to problems with the wide character version. */ #define get_number(from, to, n) \ do { \ int __n = n; \ val = 0; \ while (*rp == ' ') \ ++rp; \ if (*rp < '0' || *rp > '9') \ return NULL; \ do { \ val *= 10; \ val += *rp++ - '0'; \ } while (--__n > 0 && val * 10 <= to && *rp >= '0' && *rp <= '9'); \ if (val < from || val > to) \ return NULL; \ } while (0) #ifdef _NL_CURRENT # define get_alt_number(from, to, n) \ ({ \ __label__ do_normal; \ if (*decided != raw) \ { \ const char *alts = _NL_CURRENT (LC_TIME, ALT_DIGITS); \ int __n = n; \ int any = 0; \ while (*rp == ' ') \ ++rp; \ val = 0; \ do { \ val *= 10; \ while (*alts != '\0') \ { \ size_t len = strlen (alts); \ if (strncasecmp (alts, rp, len) == 0) \ break; \ alts += len + 1; \ ++val; \ } \ if (*alts == '\0') \ { \ if (*decided == not && ! any) \ goto do_normal; \ /* If we haven't read anything it's an error. */ \ if (! any) \ return NULL; \ /* Correct the premature multiplication. */ \ val /= 10; \ break; \ } \ else \ *decided = loc; \ } while (--__n > 0 && val * 10 <= to); \ if (val < from || val > to) \ return NULL; \ } \ else \ { \ do_normal: \ get_number (from, to, n); \ } \ 0; \ }) #else # define get_alt_number(from, to, n) \ /* We don't have the alternate representation. */ \ get_number(from, to, n) #endif #define recursive(new_fmt) \ (*(new_fmt) != '\0' \ && (rp = strptime_internal (rp, (new_fmt), tm, decided, era_cnt)) != NULL) #ifdef _LIBC /* This is defined in locale/C-time.c in the GNU libc. */ extern const struct locale_data _nl_C_LC_TIME; extern const unsigned short int __mon_yday[2][13]; # define weekday_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (DAY_1)].string) # define ab_weekday_name \ (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABDAY_1)].string) # define month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (MON_1)].string) # define ab_month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABMON_1)].string) # define HERE_D_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_T_FMT)].string) # define HERE_D_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_FMT)].string) # define HERE_AM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (AM_STR)].string) # define HERE_PM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (PM_STR)].string) # define HERE_T_FMT_AMPM \ (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT_AMPM)].string) # define HERE_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT)].string) # define strncasecmp(s1, s2, n) __strncasecmp (s1, s2, n) #else static char const weekday_name[][10] = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" }; static char const ab_weekday_name[][4] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; static char const month_name[][10] = { "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" }; static char const ab_month_name[][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; # define HERE_D_T_FMT "%a %b %e %H:%M:%S %Y" # define HERE_D_FMT "%m/%d/%y" # define HERE_AM_STR "AM" # define HERE_PM_STR "PM" # define HERE_T_FMT_AMPM "%I:%M:%S %p" # define HERE_T_FMT "%H:%M:%S" static const unsigned short int __mon_yday[2][13] = { /* Normal years. */ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, /* Leap years. */ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } }; #endif /* Status of lookup: do we use the locale data or the raw data? */ enum locale_status { not, loc, raw }; #ifndef __isleap /* Nonzero if YEAR is a leap year (every 4 years, except every 100th isn't, and every 400th is). */ # define __isleap(year) \ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) #endif /* Compute the day of the week. */ static void day_of_the_week (struct tm *tm) { /* We know that January 1st 1970 was a Thursday (= 4). Compute the the difference between this data in the one on TM and so determine the weekday. */ int corr_year = 1900 + tm->tm_year - (tm->tm_mon < 2); int wday = (-473 + (365 * (tm->tm_year - 70)) + (corr_year / 4) - ((corr_year / 4) / 25) + ((corr_year / 4) % 25 < 0) + (((corr_year / 4) / 25) / 4) + __mon_yday[0][tm->tm_mon] + tm->tm_mday - 1); tm->tm_wday = ((wday % 7) + 7) % 7; } /* Compute the day of the year. */ static void day_of_the_year (struct tm *tm) { tm->tm_yday = (__mon_yday[__isleap (1900 + tm->tm_year)][tm->tm_mon] + (tm->tm_mday - 1)); } static char * #ifdef _LIBC internal_function #endif strptime_internal __P ((const char *rp, const char *fmt, struct tm *tm, enum locale_status *decided, int era_cnt)); static char * #ifdef _LIBC internal_function #endif strptime_internal (rp, fmt, tm, decided, era_cnt) const char *rp; const char *fmt; struct tm *tm; enum locale_status *decided; int era_cnt; { int cnt; size_t val; int have_I, is_pm; int century, want_century; int want_era; int have_wday, want_xday; int have_yday; int have_mon, have_mday; #ifdef _NL_CURRENT const char *rp_backup; size_t num_eras; struct era_entry *era; era = NULL; #endif have_I = is_pm = 0; century = -1; want_century = 0; want_era = 0; have_wday = want_xday = have_yday = have_mon = have_mday = 0; while (*fmt != '\0') { /* A white space in the format string matches 0 more or white space in the input string. */ if (isspace (*fmt)) { while (isspace (*rp)) ++rp; ++fmt; continue; } /* Any character but `%' must be matched by the same character in the iput string. */ if (*fmt != '%') { match_char (*fmt++, *rp++); continue; } ++fmt; #ifndef _NL_CURRENT /* We need this for handling the `E' modifier. */ start_over: #endif #ifdef _NL_CURRENT /* Make back up of current processing pointer. */ rp_backup = rp; #endif switch (*fmt++) { case '%': /* Match the `%' character itself. */ match_char ('%', *rp++); break; case 'a': case 'A': /* Match day of week. */ for (cnt = 0; cnt < 7; ++cnt) { #ifdef _NL_CURRENT if (*decided !=raw) { if (match_string (_NL_CURRENT (LC_TIME, DAY_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, DAY_1 + cnt), weekday_name[cnt])) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), ab_weekday_name[cnt])) *decided = loc; break; } } #endif if (*decided != loc && (match_string (weekday_name[cnt], rp) || match_string (ab_weekday_name[cnt], rp))) { *decided = raw; break; } } if (cnt == 7) /* Does not match a weekday name. */ return NULL; tm->tm_wday = cnt; have_wday = 1; break; case 'b': case 'B': case 'h': /* Match month name. */ for (cnt = 0; cnt < 12; ++cnt) { #ifdef _NL_CURRENT if (*decided !=raw) { if (match_string (_NL_CURRENT (LC_TIME, MON_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, MON_1 + cnt), month_name[cnt])) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), ab_month_name[cnt])) *decided = loc; break; } } #endif if (match_string (month_name[cnt], rp) || match_string (ab_month_name[cnt], rp)) { *decided = raw; break; } } if (cnt == 12) /* Does not match a month name. */ return NULL; tm->tm_mon = cnt; want_xday = 1; break; case 'c': /* Match locale's date and time format. */ #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, D_T_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, D_T_FMT), HERE_D_T_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } #endif if (!recursive (HERE_D_T_FMT)) return NULL; want_xday = 1; break; case 'C': /* Match century number. */ #ifdef _NL_CURRENT match_century: #endif get_number (0, 99, 2); century = val; want_xday = 1; break; case 'd': case 'e': /* Match day of month. */ get_number (1, 31, 2); tm->tm_mday = val; have_mday = 1; want_xday = 1; break; case 'F': if (!recursive ("%Y-%m-%d")) return NULL; want_xday = 1; break; case 'x': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, D_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, D_FMT), HERE_D_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } #endif FALL_THROUGH; case 'D': /* Match standard day format. */ if (!recursive (HERE_D_FMT)) return NULL; want_xday = 1; break; case 'k': case 'H': /* Match hour in 24-hour clock. */ get_number (0, 23, 2); tm->tm_hour = val; have_I = 0; break; case 'I': /* Match hour in 12-hour clock. */ get_number (1, 12, 2); tm->tm_hour = val % 12; have_I = 1; break; case 'j': /* Match day number of year. */ get_number (1, 366, 3); tm->tm_yday = val - 1; have_yday = 1; break; case 'm': /* Match number of month. */ get_number (1, 12, 2); tm->tm_mon = val - 1; have_mon = 1; want_xday = 1; break; case 'M': /* Match minute. */ get_number (0, 59, 2); tm->tm_min = val; break; case 'n': case 't': /* Match any white space. */ while (isspace (*rp)) ++rp; break; case 'p': /* Match locale's equivalent of AM/PM. */ #ifdef _NL_CURRENT if (*decided != raw) { if (match_string (_NL_CURRENT (LC_TIME, AM_STR), rp)) { if (strcmp (_NL_CURRENT (LC_TIME, AM_STR), HERE_AM_STR)) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, PM_STR), rp)) { if (strcmp (_NL_CURRENT (LC_TIME, PM_STR), HERE_PM_STR)) *decided = loc; is_pm = 1; break; } *decided = raw; } #endif if (!match_string (HERE_AM_STR, rp)) { if (match_string (HERE_PM_STR, rp)) { is_pm = 1; } else { return NULL; } } break; case 'r': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, T_FMT_AMPM))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, T_FMT_AMPM), HERE_T_FMT_AMPM)) *decided = loc; break; } *decided = raw; } #endif if (!recursive (HERE_T_FMT_AMPM)) return NULL; break; case 'R': if (!recursive ("%H:%M")) return NULL; break; case 's': { /* The number of seconds may be very high so we cannot use the `get_number' macro. Instead read the number character for character and construct the result while doing this. */ time_t secs = 0; if (*rp < '0' || *rp > '9') /* We need at least one digit. */ return NULL; do { secs *= 10; secs += *rp++ - '0'; } while (*rp >= '0' && *rp <= '9'); if (localtime_r (&secs, tm) == NULL) /* Error in function. */ return NULL; } break; case 'S': get_number (0, 61, 2); tm->tm_sec = val; break; case 'X': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, T_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (_NL_CURRENT (LC_TIME, T_FMT), HERE_T_FMT)) *decided = loc; break; } *decided = raw; } #endif FALL_THROUGH; case 'T': if (!recursive (HERE_T_FMT)) return NULL; break; case 'u': get_number (1, 7, 1); tm->tm_wday = val % 7; have_wday = 1; break; case 'g': get_number (0, 99, 2); /* XXX This cannot determine any field in TM. */ break; case 'G': if (*rp < '0' || *rp > '9') return NULL; /* XXX Ignore the number since we would need some more information to compute a real date. */ do ++rp; while (*rp >= '0' && *rp <= '9'); break; case 'U': case 'V': case 'W': get_number (0, 53, 2); /* XXX This cannot determine any field in TM without some information. */ break; case 'w': /* Match number of weekday. */ get_number (0, 6, 1); tm->tm_wday = val; have_wday = 1; break; case 'y': #ifdef _NL_CURRENT match_year_in_century: #endif /* Match year within century. */ get_number (0, 99, 2); /* The "Year 2000: The Millennium Rollover" paper suggests that values in the range 69-99 refer to the twentieth century. */ tm->tm_year = val >= 69 ? val : val + 100; /* Indicate that we want to use the century, if specified. */ want_century = 1; want_xday = 1; break; case 'Y': /* Match year including century number. */ get_number (0, 9999, 4); tm->tm_year = val - 1900; want_century = 0; want_xday = 1; break; case 'Z': /* XXX How to handle this? */ break; case 'E': #ifdef _NL_CURRENT switch (*fmt++) { case 'c': /* Match locale's alternate date and time format. */ if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_T_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, D_T_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_D_T_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } /* The C locale has no era information, so use the normal representation. */ if (!recursive (HERE_D_T_FMT)) return NULL; want_xday = 1; break; case 'C': if (*decided != raw) { if (era_cnt >= 0) { era = _nl_select_era_entry (era_cnt); if (match_string (era->era_name, rp)) { *decided = loc; break; } else return NULL; } else { num_eras = _NL_CURRENT_WORD (LC_TIME, _NL_TIME_ERA_NUM_ENTRIES); for (era_cnt = 0; era_cnt < (int) num_eras; ++era_cnt, rp = rp_backup) { era = _nl_select_era_entry (era_cnt); if (match_string (era->era_name, rp)) { *decided = loc; break; } } if (era_cnt == (int) num_eras) { era_cnt = -1; if (*decided == loc) return NULL; } else break; } *decided = raw; } /* The C locale has no era information, so use the normal representation. */ goto match_century; case 'y': if (*decided == raw) goto match_year_in_century; get_number(0, 9999, 4); tm->tm_year = val; want_era = 1; want_xday = 1; break; case 'Y': if (*decided != raw) { num_eras = _NL_CURRENT_WORD (LC_TIME, _NL_TIME_ERA_NUM_ENTRIES); for (era_cnt = 0; era_cnt < (int) num_eras; ++era_cnt, rp = rp_backup) { era = _nl_select_era_entry (era_cnt); if (recursive (era->era_format)) break; } if (era_cnt == (int) num_eras) { era_cnt = -1; if (*decided == loc) return NULL; else rp = rp_backup; } else { *decided = loc; era_cnt = -1; break; } *decided = raw; } get_number (0, 9999, 4); tm->tm_year = val - 1900; want_century = 0; want_xday = 1; break; case 'x': if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, D_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_D_FMT)) *decided = loc; break; } *decided = raw; } if (!recursive (HERE_D_FMT)) return NULL; break; case 'X': if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_T_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, T_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_T_FMT)) *decided = loc; break; } *decided = raw; } if (!recursive (HERE_T_FMT)) return NULL; break; default: return NULL; } break; #else /* We have no information about the era format. Just use the normal format. */ if (*fmt != 'c' && *fmt != 'C' && *fmt != 'y' && *fmt != 'Y' && *fmt != 'x' && *fmt != 'X') /* This is an illegal format. */ return NULL; goto start_over; #endif case 'O': switch (*fmt++) { case 'd': case 'e': /* Match day of month using alternate numeric symbols. */ get_alt_number (1, 31, 2); tm->tm_mday = val; have_mday = 1; want_xday = 1; break; case 'H': /* Match hour in 24-hour clock using alternate numeric symbols. */ get_alt_number (0, 23, 2); tm->tm_hour = val; have_I = 0; break; case 'I': /* Match hour in 12-hour clock using alternate numeric symbols. */ get_alt_number (1, 12, 2); tm->tm_hour = val - 1; have_I = 1; break; case 'm': /* Match month using alternate numeric symbols. */ get_alt_number (1, 12, 2); tm->tm_mon = val - 1; have_mon = 1; want_xday = 1; break; case 'M': /* Match minutes using alternate numeric symbols. */ get_alt_number (0, 59, 2); tm->tm_min = val; break; case 'S': /* Match seconds using alternate numeric symbols. */ get_alt_number (0, 61, 2); tm->tm_sec = val; break; case 'U': case 'V': case 'W': get_alt_number (0, 53, 2); /* XXX This cannot determine any field in TM without further information. */ break; case 'w': /* Match number of weekday using alternate numeric symbols. */ get_alt_number (0, 6, 1); tm->tm_wday = val; have_wday = 1; break; case 'y': /* Match year within century using alternate numeric symbols. */ get_alt_number (0, 99, 2); tm->tm_year = val >= 69 ? val : val + 100; want_xday = 1; break; default: return NULL; } break; default: return NULL; } } if (have_I && is_pm) tm->tm_hour += 12; if (century != -1) { if (want_century) tm->tm_year = tm->tm_year % 100 + (century - 19) * 100; else /* Only the century, but not the year. Strange, but so be it. */ tm->tm_year = (century - 19) * 100; } #ifdef _NL_CURRENT if (era_cnt != -1) { era = _nl_select_era_entry(era_cnt); if (want_era) tm->tm_year = (era->start_date[0] + ((tm->tm_year - era->offset) * era->absolute_direction)); else /* Era start year assumed. */ tm->tm_year = era->start_date[0]; } else #endif if (want_era) return NULL; if (want_xday && !have_wday) { if ( !(have_mon && have_mday) && have_yday) { /* We don't have tm_mon and/or tm_mday, compute them. */ int t_mon = 0; while (__mon_yday[__isleap(1900 + tm->tm_year)][t_mon] <= tm->tm_yday) t_mon++; if (!have_mon) tm->tm_mon = t_mon - 1; if (!have_mday) tm->tm_mday = (tm->tm_yday - __mon_yday[__isleap(1900 + tm->tm_year)][t_mon - 1] + 1); } day_of_the_week (tm); } if (want_xday && !have_yday) day_of_the_year (tm); return discard_const_p(char, rp); } char *rep_strptime(const char *buf, const char *format, struct tm *tm) { enum locale_status decided; #ifdef _NL_CURRENT decided = not; #else decided = raw; #endif return strptime_internal (buf, format, tm, &decided, -1); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/README0000660000000000000000000000036200000000000017421 0ustar00rootroot00000000000000This directory contains wrappers around logical groups of system include files. The idea is to avoid #ifdef blocks in the main code, and instead put all the necessary conditional includes in subsystem specific header files in this directory. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/capability.h0000660000000000000000000000322600000000000021035 0ustar00rootroot00000000000000#ifndef _system_capability_h #define _system_capability_h /* Unix SMB/CIFS implementation. capability system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_CAPABILITY_H #if defined(BROKEN_REDHAT_7_SYSTEM_HEADERS) && !defined(_I386_STATFS_H) && !defined(_PPC_STATFS_H) #define _I386_STATFS_H #define _PPC_STATFS_H #define BROKEN_REDHAT_7_STATFS_WORKAROUND #endif #if defined(BROKEN_RHEL5_SYS_CAP_HEADER) && !defined(_LINUX_TYPES_H) #define BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #endif #ifdef HAVE_POSIX_CAPABILITIES #include #endif #ifdef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #undef _LINUX_TYPES_H #undef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #endif #ifdef BROKEN_REDHAT_7_STATFS_WORKAROUND #undef _PPC_STATFS_H #undef _I386_STATFS_H #undef BROKEN_REDHAT_7_STATFS_WORKAROUND #endif #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/dir.h0000660000000000000000000000352500000000000017474 0ustar00rootroot00000000000000#ifndef _system_dir_h #define _system_dir_h /* Unix SMB/CIFS implementation. directory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_DIRENT_H # include # define NAMLEN(dirent) strlen((dirent)->d_name) #else # define dirent direct # define NAMLEN(dirent) (dirent)->d_namlen # if HAVE_SYS_NDIR_H # include # endif # if HAVE_SYS_DIR_H # include # endif # if HAVE_NDIR_H # include # endif #endif #ifndef HAVE_MKDIR_MODE #define mkdir(dir, mode) mkdir(dir) #endif #ifdef HAVE_LIBGEN_H # include #endif /* Test whether a file name is the "." or ".." directory entries. * These really should be inline functions. */ #ifndef ISDOT #define ISDOT(path) ( \ *((const char *)(path)) == '.' && \ *(((const char *)(path)) + 1) == '\0' \ ) #endif #ifndef ISDOTDOT #define ISDOTDOT(path) ( \ *((const char *)(path)) == '.' && \ *(((const char *)(path)) + 1) == '.' && \ *(((const char *)(path)) + 2) == '\0' \ ) #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/replace/system/filesys.h0000660000000000000000000001365000000000000020374 0ustar00rootroot00000000000000#ifndef _system_filesys_h #define _system_filesys_h /* Unix SMB/CIFS implementation. filesystem system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_UNISTD_H #include #endif #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_MNTENT_H #include #endif #ifdef HAVE_SYS_VFS_H #include #endif #ifdef HAVE_SYS_ACL_H #include #endif #ifdef HAVE_ACL_LIBACL_H #include #endif #ifdef HAVE_SYS_FS_S5PARAM_H #include #endif #if defined (HAVE_SYS_FILSYS_H) && !defined (_CRAY) #include #endif #ifdef HAVE_SYS_STATFS_H # include #endif #ifdef HAVE_DUSTAT_H #include #endif #ifdef HAVE_SYS_STATVFS_H #include #endif #ifdef HAVE_SYS_FILIO_H #include #endif #ifdef HAVE_SYS_FILE_H #include #endif #ifdef HAVE_FCNTL_H #include #else #ifdef HAVE_SYS_FCNTL_H #include #endif #endif #ifdef HAVE_SYS_MODE_H /* apparently AIX needs this for S_ISLNK */ #ifndef S_ISLNK #include #endif #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #ifdef HAVE_SYS_UIO_H #include #endif /* mutually exclusive (SuSE 8.2) */ #if defined(HAVE_SYS_XATTR_H) #include #elif defined(HAVE_ATTR_XATTR_H) #include #elif defined(HAVE_SYS_ATTRIBUTES_H) #include #elif defined(HAVE_ATTR_ATTRIBUTES_H) #include #endif #ifdef HAVE_SYS_EA_H #include #endif #ifdef HAVE_SYS_EXTATTR_H #include #endif #ifdef HAVE_SYS_RESOURCE_H #include #endif #ifndef XATTR_CREATE #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ #endif #ifndef XATTR_REPLACE #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ #endif /* Some POSIX definitions for those without */ #ifndef S_IFDIR #define S_IFDIR 0x4000 #endif #ifndef S_ISDIR #define S_ISDIR(mode) ((mode & 0xF000) == S_IFDIR) #endif #ifndef S_IRWXU #define S_IRWXU 00700 /* read, write, execute: owner */ #endif #ifndef S_IRUSR #define S_IRUSR 00400 /* read permission: owner */ #endif #ifndef S_IWUSR #define S_IWUSR 00200 /* write permission: owner */ #endif #ifndef S_IXUSR #define S_IXUSR 00100 /* execute permission: owner */ #endif #ifndef S_IRWXG #define S_IRWXG 00070 /* read, write, execute: group */ #endif #ifndef S_IRGRP #define S_IRGRP 00040 /* read permission: group */ #endif #ifndef S_IWGRP #define S_IWGRP 00020 /* write permission: group */ #endif #ifndef S_IXGRP #define S_IXGRP 00010 /* execute permission: group */ #endif #ifndef S_IRWXO #define S_IRWXO 00007 /* read, write, execute: other */ #endif #ifndef S_IROTH #define S_IROTH 00004 /* read permission: other */ #endif #ifndef S_IWOTH #define S_IWOTH 00002 /* write permission: other */ #endif #ifndef S_IXOTH #define S_IXOTH 00001 /* execute permission: other */ #endif #ifndef O_ACCMODE #define O_ACCMODE (O_RDONLY | O_WRONLY | O_RDWR) #endif #ifndef MAXPATHLEN #define MAXPATHLEN 256 #endif #ifndef SEEK_SET #define SEEK_SET 0 #endif #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif #ifdef DISABLE_OPATH #undef O_PATH #endif /* this allows us to use a uniform error handling for our xattr wrappers */ #ifndef ENOATTR #define ENOATTR ENODATA #endif #if !defined(HAVE_XATTR_XATTR) || defined(XATTR_ADDITIONAL_OPTIONS) ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size); #define getxattr(path, name, value, size) rep_getxattr(path, name, value, size) /* define is in "replace.h" */ ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size); #define fgetxattr(filedes, name, value, size) rep_fgetxattr(filedes, name, value, size) /* define is in "replace.h" */ ssize_t rep_listxattr (const char *path, char *list, size_t size); #define listxattr(path, list, size) rep_listxattr(path, list, size) /* define is in "replace.h" */ ssize_t rep_flistxattr (int filedes, char *list, size_t size); #define flistxattr(filedes, value, size) rep_flistxattr(filedes, value, size) /* define is in "replace.h" */ int rep_removexattr (const char *path, const char *name); #define removexattr(path, name) rep_removexattr(path, name) /* define is in "replace.h" */ int rep_fremovexattr (int filedes, const char *name); #define fremovexattr(filedes, name) rep_fremovexattr(filedes, name) /* define is in "replace.h" */ int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags); #define setxattr(path, name, value, size, flags) rep_setxattr(path, name, value, size, flags) /* define is in "replace.h" */ int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags); #define fsetxattr(filedes, name, value, size, flags) rep_fsetxattr(filedes, name, value, size, flags) /* define is in "replace.h" */ #endif /* !defined(HAVE_XATTR_XATTR) || defined(XATTR_ADDITIONAL_OPTIONS) */ #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/glob.h0000660000000000000000000000207700000000000017642 0ustar00rootroot00000000000000#ifndef _system_glob_h #define _system_glob_h /* Unix SMB/CIFS implementation. glob system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_GLOB_H #include #endif #ifdef HAVE_FNMATCH_H #include #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/gssapi.h0000660000000000000000000000272500000000000020205 0ustar00rootroot00000000000000#ifndef _system_gssapi_h #define _system_gssapi_h /* Unix SMB/CIFS implementation. GSSAPI system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_GSSAPI #ifdef HAVE_GSSAPI_GSSAPI_EXT_H #include #elif defined(HAVE_GSSAPI_GSSAPI_H) #include #elif defined(HAVE_GSSAPI_GSSAPI_GENERIC_H) #include #elif defined(HAVE_GSSAPI_H) #include #endif #ifdef HAVE_GSSAPI_GSSAPI_KRB5_H #include #endif #ifdef HAVE_GSSAPI_GSSAPI_SPNEGO_H #include #elif defined(HAVE_GSSAPI_SPNEGO_H) #include #endif #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/iconv.h0000660000000000000000000000304600000000000020032 0ustar00rootroot00000000000000#ifndef _system_iconv_h #define _system_iconv_h /* Unix SMB/CIFS implementation. iconv memory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #if !defined(HAVE_ICONV) && defined(HAVE_ICONV_H) #define HAVE_ICONV #endif #if !defined(HAVE_GICONV) && defined(HAVE_GICONV_H) #define HAVE_GICONV #endif #if !defined(HAVE_BICONV) && defined(HAVE_BICONV_H) #define HAVE_BICONV #endif #ifdef HAVE_NATIVE_ICONV #if defined(HAVE_ICONV) #include #elif defined(HAVE_GICONV) #include #elif defined(HAVE_BICONV) #include #endif #endif /* HAVE_NATIVE_ICONV */ /* needed for some systems without iconv. Doesn't really matter what error code we use */ #ifndef EILSEQ #define EILSEQ EIO #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/kerberos.h0000660000000000000000000000213700000000000020530 0ustar00rootroot00000000000000#ifndef _system_kerberos_h #define _system_kerberos_h /* Unix SMB/CIFS implementation. kerberos system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_KRB5 #ifdef HAVE_KRB5_H #include #endif #ifdef HAVE_COM_ERR_H #include #endif #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/locale.h0000660000000000000000000000216400000000000020153 0ustar00rootroot00000000000000#ifndef _system_locale_h #define _system_locale_h /* Unix SMB/CIFS implementation. locale include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_CTYPE_H #include #endif #ifdef HAVE_LOCALE_H #include #endif #ifdef HAVE_LANGINFO_H #include #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/replace/system/network.h0000660000000000000000000001745300000000000020414 0ustar00rootroot00000000000000#ifndef _system_network_h #define _system_network_h /* Unix SMB/CIFS implementation. networking system include wrappers Copyright (C) Andrew Tridgell 2004 Copyright (C) Jelmer Vernooij 2007 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef LIBREPLACE_NETWORK_CHECKS #error "AC_LIBREPLACE_NETWORK_CHECKS missing in configure" #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_SYS_SOCKET_H #include #endif #ifdef HAVE_UNIXSOCKET #include #endif #ifdef HAVE_NETINET_IN_H #include #endif #ifdef HAVE_ARPA_INET_H #include #endif #ifdef HAVE_NETDB_H #include #endif #ifdef HAVE_NETINET_TCP_H #include #endif /* * The next three defines are needed to access the IPTOS_* options * on some systems. */ #ifdef HAVE_NETINET_IN_SYSTM_H #include #endif #ifdef HAVE_NETINET_IN_IP_H #include #endif #ifdef HAVE_NETINET_IP_H #include #endif #ifdef HAVE_NET_IF_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #ifdef HAVE_SYS_UIO_H #include #endif #ifdef HAVE_STROPTS_H #include #endif #ifndef HAVE_SOCKLEN_T #define HAVE_SOCKLEN_T typedef int socklen_t; #endif #if !defined (HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) /* define is in "replace.h" */ char *rep_inet_ntoa(struct in_addr ip); #endif #ifndef HAVE_INET_PTON /* define is in "replace.h" */ int rep_inet_pton(int af, const char *src, void *dst); #endif #ifndef HAVE_INET_NTOP /* define is in "replace.h" */ const char *rep_inet_ntop(int af, const void *src, char *dst, socklen_t size); #endif #ifndef HAVE_INET_ATON /* define is in "replace.h" */ int rep_inet_aton(const char *src, struct in_addr *dst); #endif #ifndef HAVE_CONNECT /* define is in "replace.h" */ int rep_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen); #endif #ifndef HAVE_GETHOSTBYNAME /* define is in "replace.h" */ struct hostent *rep_gethostbyname(const char *name); #endif #ifdef HAVE_IFADDRS_H #include #endif #ifndef HAVE_STRUCT_IFADDRS struct ifaddrs { struct ifaddrs *ifa_next; /* Pointer to next struct */ char *ifa_name; /* Interface name */ unsigned int ifa_flags; /* Interface flags */ struct sockaddr *ifa_addr; /* Interface address */ struct sockaddr *ifa_netmask; /* Interface netmask */ #undef ifa_dstaddr struct sockaddr *ifa_dstaddr; /* P2P interface destination */ void *ifa_data; /* Address specific data */ }; #endif #ifndef HAVE_GETIFADDRS int rep_getifaddrs(struct ifaddrs **); #endif #ifndef HAVE_FREEIFADDRS void rep_freeifaddrs(struct ifaddrs *); #endif #ifndef HAVE_SOCKETPAIR /* define is in "replace.h" */ int rep_socketpair(int d, int type, int protocol, int sv[2]); #endif /* * Some systems have getaddrinfo but not the * defines needed to use it. */ /* Various macros that ought to be in , but might not be */ #ifndef EAI_FAIL #define EAI_BADFLAGS (-1) #define EAI_NONAME (-2) #define EAI_AGAIN (-3) #define EAI_FAIL (-4) #define EAI_FAMILY (-6) #define EAI_SOCKTYPE (-7) #define EAI_SERVICE (-8) #define EAI_MEMORY (-10) #define EAI_SYSTEM (-11) #endif /* !EAI_FAIL */ #ifndef AI_PASSIVE #define AI_PASSIVE 0x0001 #endif #ifndef AI_CANONNAME #define AI_CANONNAME 0x0002 #endif #ifndef AI_NUMERICHOST /* * some platforms don't support AI_NUMERICHOST; define as zero if using * the system version of getaddrinfo... */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_NUMERICHOST 0 #else #define AI_NUMERICHOST 0x0004 #endif #endif /* * Some of the functions in source3/lib/util_sock.c use AI_ADDRCONFIG. On QNX * 6.3.0, this macro is defined but, if it's used, getaddrinfo will fail. This * prevents smbd from opening any sockets. * * If I undefine AI_ADDRCONFIG on such systems and define it to be 0, * this works around the issue. */ #ifdef __QNX__ #include #if _NTO_VERSION == 630 #undef AI_ADDRCONFIG #endif #endif #ifndef AI_ADDRCONFIG /* * logic copied from AI_NUMERICHOST */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_ADDRCONFIG 0 #else #define AI_ADDRCONFIG 0x0020 #endif #endif #ifndef AI_NUMERICSERV /* * logic copied from AI_NUMERICHOST */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_NUMERICSERV 0 #else #define AI_NUMERICSERV 0x0400 #endif #endif #ifndef NI_NUMERICHOST #define NI_NUMERICHOST 1 #endif #ifndef NI_NUMERICSERV #define NI_NUMERICSERV 2 #endif #ifndef NI_NOFQDN #define NI_NOFQDN 4 #endif #ifndef NI_NAMEREQD #define NI_NAMEREQD 8 #endif #ifndef NI_DGRAM #define NI_DGRAM 16 #endif #ifndef NI_MAXHOST #define NI_MAXHOST 1025 #endif #ifndef NI_MAXSERV #define NI_MAXSERV 32 #endif /* * glibc on linux doesn't seem to have MSG_WAITALL * defined. I think the kernel has it though.. */ #ifndef MSG_WAITALL #define MSG_WAITALL 0 #endif #ifndef INADDR_LOOPBACK #define INADDR_LOOPBACK 0x7f000001 #endif #ifndef INADDR_NONE #define INADDR_NONE 0xffffffff #endif #ifndef EAFNOSUPPORT #define EAFNOSUPPORT EINVAL #endif #ifndef INET_ADDRSTRLEN #define INET_ADDRSTRLEN 16 #endif #ifndef INET6_ADDRSTRLEN #define INET6_ADDRSTRLEN 46 #endif #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 255 #endif #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN HOST_NAME_MAX #endif #ifndef HAVE_SA_FAMILY_T #define HAVE_SA_FAMILY_T typedef unsigned short int sa_family_t; #endif #ifndef HAVE_STRUCT_SOCKADDR_STORAGE #define HAVE_STRUCT_SOCKADDR_STORAGE #ifdef HAVE_STRUCT_SOCKADDR_IN6 #define sockaddr_storage sockaddr_in6 #define ss_family sin6_family #define HAVE_SS_FAMILY 1 #else /*HAVE_STRUCT_SOCKADDR_IN6*/ #define sockaddr_storage sockaddr_in #define ss_family sin_family #define HAVE_SS_FAMILY 1 #endif /*HAVE_STRUCT_SOCKADDR_IN6*/ #endif /*HAVE_STRUCT_SOCKADDR_STORAGE*/ #ifndef HAVE_SS_FAMILY #ifdef HAVE___SS_FAMILY #define ss_family __ss_family #define HAVE_SS_FAMILY 1 #endif #endif #ifndef IOV_MAX # ifdef UIO_MAXIOV # define IOV_MAX UIO_MAXIOV # else # ifdef __sgi /* * IRIX 6.5 has sysconf(_SC_IOV_MAX) * which might return 512 or bigger */ # define IOV_MAX 512 # endif # ifdef __GNU__ /* * GNU/Hurd does not have such hardcoded limitations. Use a reasonable * amount. */ # define IOV_MAX 512 # endif # endif #endif #ifndef HAVE_STRUCT_ADDRINFO #define HAVE_STRUCT_ADDRINFO struct addrinfo { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; socklen_t ai_addrlen; struct sockaddr *ai_addr; char *ai_canonname; struct addrinfo *ai_next; }; #endif /* HAVE_STRUCT_ADDRINFO */ #if !defined(HAVE_GETADDRINFO) #include "getaddrinfo.h" #endif /* Needed for some systems that don't define it (Solaris). */ #ifndef ifr_netmask #define ifr_netmask ifr_addr #endif /* Some old Linux systems have broken header files */ #ifdef HAVE_IPV6 #ifdef HAVE_LINUX_IPV6_V6ONLY_26 #define IPV6_V6ONLY 26 #endif /* HAVE_LINUX_IPV6_V6ONLY_26 */ #endif /* HAVE_IPV6 */ #ifndef SCOPE_DELIMITER #define SCOPE_DELIMITER '%' #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/passwd.h0000660000000000000000000000404200000000000020212 0ustar00rootroot00000000000000#ifndef _system_passwd_h #define _system_passwd_h /* Unix SMB/CIFS implementation. passwd system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_PWD_H #include #endif #ifdef HAVE_GRP_H #include #endif #ifdef HAVE_SYS_PRIV_H #include #endif #ifdef HAVE_SYS_ID_H #include #endif #ifdef HAVE_CRYPT_H #include #endif #ifdef HAVE_SHADOW_H #include #endif #ifdef HAVE_SYS_SECURITY_H #include #include #define PASSWORD_LENGTH 16 #endif /* HAVE_SYS_SECURITY_H */ #ifdef HAVE_GETPWANAM #include #include #include #endif #ifdef HAVE_COMPAT_H #include #endif #ifndef NGROUPS_MAX #define NGROUPS_MAX 32 /* Guess... */ #endif /* what is the longest significant password available on your system? Knowing this speeds up password searches a lot */ #ifndef PASSWORD_LENGTH #define PASSWORD_LENGTH 8 #endif #ifndef ALLOW_CHANGE_PASSWORD #if (defined(HAVE_TERMIOS_H) && defined(HAVE_DUP2) && defined(HAVE_SETSID)) #define ALLOW_CHANGE_PASSWORD 1 #endif #endif #if defined(HAVE_CRYPT16) && defined(HAVE_GETAUTHUID) #define ULTRIX_AUTH 1 #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/readline.h0000660000000000000000000000335000000000000020475 0ustar00rootroot00000000000000#ifndef _system_readline_h #define _system_readline_h /* Unix SMB/CIFS implementation. Readline wrappers ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_LIBREADLINE # ifdef HAVE_READLINE_READLINE_H # ifdef HAVE_READLINE_READLINE_WORKAROUND # define _FUNCTION_DEF # endif # include # ifdef HAVE_READLINE_HISTORY_H # include # endif # else # ifdef HAVE_READLINE_H # include # ifdef HAVE_HISTORY_H # include # endif # else # undef HAVE_LIBREADLINE # endif # endif #endif #ifdef HAVE_NEW_LIBREADLINE #ifdef HAVE_CPPFUNCTION # define RL_COMPLETION_CAST (CPPFunction *) #elif defined(HAVE_RL_COMPLETION_T) # define RL_COMPLETION_CAST (rl_completion_t *) #else # define RL_COMPLETION_CAST #endif #else /* This type is missing from libreadline<4.0 (approximately) */ # define RL_COMPLETION_CAST #endif /* HAVE_NEW_LIBREADLINE */ #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/select.h0000660000000000000000000000462200000000000020174 0ustar00rootroot00000000000000#ifndef _system_select_h #define _system_select_h /* Unix SMB/CIFS implementation. select system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_SELECT_H #include #endif #ifdef HAVE_SYS_EPOLL_H #include #endif #ifdef HAVE_SOLARIS_PORTS #include #endif #ifndef SELECT_CAST #define SELECT_CAST #endif #ifdef HAVE_POLL #include #else /* Type used for the number of file descriptors. */ typedef unsigned long int nfds_t; /* Data structure describing a polling request. */ struct pollfd { int fd; /* File descriptor to poll. */ short int events; /* Types of events poller cares about. */ short int revents; /* Types of events that actually occurred. */ }; /* Event types that can be polled for. These bits may be set in `events' to indicate the interesting event types; they will appear in `revents' to indicate the status of the file descriptor. */ #define POLLIN 0x001 /* There is data to read. */ #define POLLPRI 0x002 /* There is urgent data to read. */ #define POLLOUT 0x004 /* Writing now will not block. */ #define POLLRDNORM 0x040 /* Normal data may be read. */ #define POLLRDBAND 0x080 /* Priority data may be read. */ #define POLLWRNORM 0x100 /* Writing now will not block. */ #define POLLWRBAND 0x200 /* Priority data may be written. */ #define POLLERR 0x008 /* Error condition. */ #define POLLHUP 0x010 /* Hung up. */ #define POLLNVAL 0x020 /* Invalid polling request. */ /* define is in "replace.h" */ int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout); #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/shmem.h0000660000000000000000000000262500000000000020027 0ustar00rootroot00000000000000#ifndef _system_shmem_h #define _system_shmem_h /* Unix SMB/CIFS implementation. shared memory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #if defined(HAVE_SYS_IPC_H) #include #endif /* HAVE_SYS_IPC_H */ #if defined(HAVE_SYS_SHM_H) #include #endif /* HAVE_SYS_SHM_H */ #ifdef HAVE_SYS_MMAN_H #include #endif /* NetBSD doesn't have these */ #ifndef SHM_R #define SHM_R 0400 #endif #ifndef SHM_W #define SHM_W 0200 #endif #ifndef MAP_FILE #define MAP_FILE 0 #endif #ifndef MAP_FAILED #define MAP_FAILED ((void *)-1) #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/syslog.h0000660000000000000000000000343500000000000020236 0ustar00rootroot00000000000000#ifndef _system_syslog_h #define _system_syslog_h /* Unix SMB/CIFS implementation. syslog system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYSLOG_H #include #else #ifdef HAVE_SYS_SYSLOG_H #include #endif #endif /* For sys_adminlog(). */ #ifndef LOG_EMERG #define LOG_EMERG 0 /* system is unusable */ #endif #ifndef LOG_ALERT #define LOG_ALERT 1 /* action must be taken immediately */ #endif #ifndef LOG_CRIT #define LOG_CRIT 2 /* critical conditions */ #endif #ifndef LOG_ERR #define LOG_ERR 3 /* error conditions */ #endif #ifndef LOG_WARNING #define LOG_WARNING 4 /* warning conditions */ #endif #ifndef LOG_NOTICE #define LOG_NOTICE 5 /* normal but significant condition */ #endif #ifndef LOG_INFO #define LOG_INFO 6 /* informational */ #endif #ifndef LOG_DEBUG #define LOG_DEBUG 7 /* debug-level messages */ #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/terminal.h0000660000000000000000000000262500000000000020531 0ustar00rootroot00000000000000#ifndef _system_terminal_h #define _system_terminal_h /* Unix SMB/CIFS implementation. terminal system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef SUNOS4 /* on SUNOS4 termios.h conflicts with sys/ioctl.h */ #undef HAVE_TERMIOS_H #endif #if defined(HAVE_TERMIOS_H) /* POSIX terminal handling. */ #include #elif defined(HAVE_TERMIO_H) /* Older SYSV terminal handling - don't use if we can avoid it. */ #include #elif defined(HAVE_SYS_TERMIO_H) /* Older SYSV terminal handling - don't use if we can avoid it. */ #include #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/threads.h0000660000000000000000000000465300000000000020353 0ustar00rootroot00000000000000#ifndef _system_threads_h #define _system_threads_h /* Unix SMB/CIFS implementation. macros to go along with the lib/replace/ portability layer code Copyright (C) Volker Lendecke 2012 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #if defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP) && \ !defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST) #define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np #endif #if defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP) && \ !defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST) #define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP #endif #if defined(HAVE_PTHREAD_MUTEX_CONSISTENT_NP) && \ !defined(HAVE_PTHREAD_MUTEX_CONSISTENT) #define pthread_mutex_consistent pthread_mutex_consistent_np #endif #ifdef HAVE_STDATOMIC_H #include #endif #ifndef HAVE_ATOMIC_THREAD_FENCE #ifdef HAVE___ATOMIC_THREAD_FENCE #define atomic_thread_fence(__ignore_order) __atomic_thread_fence(__ATOMIC_SEQ_CST) #define HAVE_ATOMIC_THREAD_FENCE 1 #endif /* HAVE___ATOMIC_THREAD_FENCE */ #endif /* not HAVE_ATOMIC_THREAD_FENCE */ #ifndef HAVE_ATOMIC_THREAD_FENCE #ifdef HAVE___SYNC_SYNCHRONIZE #define atomic_thread_fence(__ignore_order) __sync_synchronize() #define HAVE_ATOMIC_THREAD_FENCE 1 #endif /* HAVE___SYNC_SYNCHRONIZE */ #endif /* not HAVE_ATOMIC_THREAD_FENCE */ #ifndef HAVE_ATOMIC_THREAD_FENCE #ifdef HAVE_ATOMIC_THREAD_FENCE_SUPPORT #error mismatch_error_between_configure_test_and_header #endif /* make sure the build fails if someone uses it without checking the define */ #define atomic_thread_fence(__order) \ __function__atomic_thread_fence_not_available_on_this_platform__() #endif /* not HAVE_ATOMIC_THREAD_FENCE */ #endif ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/lib/replace/system/time.h0000660000000000000000000000543400000000000017655 0ustar00rootroot00000000000000#ifndef _system_time_h #define _system_time_h /* Unix SMB/CIFS implementation. time system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef TIME_WITH_SYS_TIME #include #include #else #ifdef HAVE_SYS_TIME_H #include #else #include #endif #endif #ifdef HAVE_UTIME_H #include #else struct utimbuf { time_t actime; /* access time */ time_t modtime; /* modification time */ }; #endif #ifndef HAVE_STRUCT_TIMESPEC struct timespec { time_t tv_sec; /* Seconds. */ long tv_nsec; /* Nanoseconds. */ }; #endif #ifndef HAVE_MKTIME /* define is in "replace.h" */ time_t rep_mktime(struct tm *t); #endif #ifndef HAVE_TIMEGM /* define is in "replace.h" */ time_t rep_timegm(struct tm *tm); #endif #ifndef HAVE_UTIME /* define is in "replace.h" */ int rep_utime(const char *filename, const struct utimbuf *buf); #endif #ifndef HAVE_UTIMES /* define is in "replace.h" */ int rep_utimes(const char *filename, const struct timeval tv[2]); #endif #ifndef HAVE_CLOCK_GETTIME /* CLOCK_REALTIME is required by POSIX */ #define CLOCK_REALTIME 0 typedef int clockid_t; int rep_clock_gettime(clockid_t clk_id, struct timespec *tp); #endif /* make sure we have a best effort CUSTOM_CLOCK_MONOTONIC we can rely on. * * on AIX the values of CLOCK_* are cast expressions, not integer constants, * this prevents them from being compared against in a preprocessor directive. * The following ...IS_* macros can be used to check which clock is in use. */ #if defined(CLOCK_MONOTONIC) #define CUSTOM_CLOCK_MONOTONIC CLOCK_MONOTONIC #define CUSTOM_CLOCK_MONOTONIC_IS_MONOTONIC #elif defined(CLOCK_HIGHRES) #define CUSTOM_CLOCK_MONOTONIC CLOCK_HIGHRES #define CUSTOM_CLOCK_MONOTONIC_IS_HIGHRES #else #define CUSTOM_CLOCK_MONOTONIC CLOCK_REALTIME #define CUSTOM_CLOCK_MONOTONIC_IS_REALTIME #endif #ifndef UTIME_NOW #define UTIME_NOW ((1l << 30) - 1l) #endif #ifndef UTIME_OMIT #define UTIME_OMIT ((1l << 30) - 2l) #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/wait.h0000660000000000000000000000257200000000000017663 0ustar00rootroot00000000000000#ifndef _system_wait_h #define _system_wait_h /* Unix SMB/CIFS implementation. waitpid system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_WAIT_H #include #endif #include #ifndef SIGCLD #define SIGCLD SIGCHLD #endif #ifdef HAVE_SETJMP_H #include #endif #ifdef HAVE_SYS_UCONTEXT_H #include #endif #if !defined(HAVE_SIG_ATOMIC_T_TYPE) typedef int sig_atomic_t; #endif #if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) int rep_waitpid(pid_t pid,int *status,int options); #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/system/wscript_configure0000660000000000000000000000156400000000000022225 0ustar00rootroot00000000000000#!/usr/bin/env python # solaris varients of getXXent_r conf.CHECK_C_PROTOTYPE('getpwent_r', 'struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)', define='SOLARIS_GETPWENT_R', headers='pwd.h') conf.CHECK_C_PROTOTYPE('getgrent_r', 'struct group *getgrent_r(struct group *src, char *buf, int buflen)', define='SOLARIS_GETGRENT_R', headers='grp.h') # the irix varients conf.CHECK_C_PROTOTYPE('getpwent_r', 'struct passwd *getpwent_r(struct passwd *src, char *buf, size_t buflen)', define='SOLARIS_GETPWENT_R', headers='pwd.h') conf.CHECK_C_PROTOTYPE('getgrent_r', 'struct group *getgrent_r(struct group *src, char *buf, size_t buflen)', define='SOLARIS_GETGRENT_R', headers='grp.h') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/tests/getifaddrs.c0000660000000000000000000000504500000000000020642 0ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * * libreplace getifaddrs test * * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #ifndef AUTOCONF_TEST #include "replace.h" #include "system/network.h" #include "replace-test.h" #endif #ifdef HAVE_INET_NTOP #define rep_inet_ntop inet_ntop #endif static const char *format_sockaddr(struct sockaddr *addr, char *addrstring, socklen_t addrlen) { const char *result = NULL; if (addr->sa_family == AF_INET) { result = rep_inet_ntop(AF_INET, &((struct sockaddr_in *)addr)->sin_addr, addrstring, addrlen); #ifdef HAVE_STRUCT_SOCKADDR_IN6 } else if (addr->sa_family == AF_INET6) { result = rep_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)addr)->sin6_addr, addrstring, addrlen); #endif } return result; } int getifaddrs_test(void) { struct ifaddrs *ifs = NULL; struct ifaddrs *ifs_head = NULL; int ret; ret = getifaddrs(&ifs); ifs_head = ifs; if (ret != 0) { fprintf(stderr, "getifaddrs() failed: %s\n", strerror(errno)); return 1; } while (ifs) { printf("%-10s ", ifs->ifa_name); if (ifs->ifa_addr != NULL) { char addrstring[INET6_ADDRSTRLEN]; const char *result; result = format_sockaddr(ifs->ifa_addr, addrstring, sizeof(addrstring)); if (result != NULL) { printf("IP=%s ", addrstring); } if (ifs->ifa_netmask != NULL) { result = format_sockaddr(ifs->ifa_netmask, addrstring, sizeof(addrstring)); if (result != NULL) { printf("NETMASK=%s", addrstring); } } else { printf("AF=%d ", ifs->ifa_addr->sa_family); } } else { printf(""); } printf("\n"); ifs = ifs->ifa_next; } freeifaddrs(ifs_head); return 0; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/tests/incoherent_mmap.c0000660000000000000000000000345600000000000021702 0ustar00rootroot00000000000000/* In OpenBSD, if you write to a file, another process doesn't see it * in its mmap. Returns with exit status 0 if that is the case, 1 if * it's coherent, and other if there's a problem. */ #include #include #include #include #include #include #include #include #include #define DATA "coherent.mmap" int main(int argc, char *argv[]) { int tochild[2], toparent[2]; int fd; volatile unsigned char *map; unsigned char *page; const char *fname = argv[1]; char c = 0; if (pipe(tochild) != 0 || pipe(toparent) != 0) err(2, "Creating pipe"); if (!fname) fname = DATA; fd = open(fname, O_RDWR|O_CREAT|O_TRUNC, 0600); if (fd < 0) err(2, "opening %s", fname); unlink(fname); switch (fork()) { case -1: err(2, "Fork"); case 0: close(tochild[1]); close(toparent[0]); /* Wait for parent to create file. */ if (read(tochild[0], &c, 1) != 1) err(2, "reading from parent"); /* Alter first byte. */ pwrite(fd, &c, 1, 0); if (write(toparent[1], &c, 1) != 1) err(2, "writing to parent"); exit(0); default: close(tochild[0]); close(toparent[1]); /* Create a file and mmap it. */ page = malloc(getpagesize()); memset(page, 0x42, getpagesize()); if (write(fd, page, getpagesize()) != getpagesize()) err(2, "writing first page"); map = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (map == MAP_FAILED) err(2, "mapping file"); if (*map != 0x42) errx(2, "first byte isn't 0x42!"); /* Tell child to alter file. */ if (write(tochild[1], &c, 1) != 1) err(2, "writing to child"); if (read(toparent[0], &c, 1) != 1) err(2, "reading from child"); if (*map) errx(0, "mmap incoherent: first byte isn't 0."); exit(1); } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/tests/main.c0000660000000000000000000000205000000000000017443 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. libreplace tests Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "replace-testsuite.h" int main(void) { bool ret = torture_local_replace(NULL); if (ret) return 0; return -1; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/replace/tests/os2_delete.c0000660000000000000000000000516000000000000020551 0ustar00rootroot00000000000000/* test readdir/unlink pattern that OS/2 uses tridge@samba.org July 2005 */ #include #include #include #include #include #include #include #include #include #include "replace-test.h" #define NUM_FILES 700 #define READDIR_SIZE 100 #define DELETE_SIZE 4 #define TESTDIR "test.dir" static int test_readdir_os2_delete_ret; #define FAILED(d) (printf("failure: readdir [\nFailed for %s - %d = %s\n]\n", d, errno, strerror(errno)), test_readdir_os2_delete_ret = 1) #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif static void cleanup(void) { /* I'm a lazy bastard */ if (system("rm -rf " TESTDIR)) { FAILED("system"); } mkdir(TESTDIR, 0700) == 0 || FAILED("mkdir"); } static void create_files(void) { int i; for (i=0;id_name); } if (i == 0) { return 0; } /* delete the first few */ for (j=0; jd_name, ".") == 0 || FAILED("match ."); de = readdir(d); strcmp(de->d_name, "..") == 0 || FAILED("match .."); while (1) { int n = os2_delete(d); if (n == 0) break; total_deleted += n; } closedir(d); fprintf(stderr, "Deleted %d files of %d\n", total_deleted, NUM_FILES); rmdir(TESTDIR) == 0 || FAILED("rmdir"); if (system("rm -rf " TESTDIR) == -1) { FAILED("system"); } return test_readdir_os2_delete_ret; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/tests/shared_mmap.c0000660000000000000000000000231500000000000021003 0ustar00rootroot00000000000000/* this tests whether we can use a shared writeable mmap on a file - as needed for the mmap variant of FAST_SHARE_MODES */ #if defined(HAVE_UNISTD_H) #include #endif #ifdef HAVE_STDLIB_H #include #endif #include #include #include #include #define DATA "conftest.mmap" #ifndef MAP_FILE #define MAP_FILE 0 #endif int main(void) { int *buf; int i; int fd = open(DATA,O_RDWR|O_CREAT|O_TRUNC,0666); int count=7; if (fd == -1) exit(1); for (i=0;i<10000;i++) { write(fd,&i,sizeof(i)); } close(fd); if (fork() == 0) { fd = open(DATA,O_RDWR); if (fd == -1) exit(1); buf = (int *)mmap(NULL, 10000*sizeof(int), (PROT_READ | PROT_WRITE), MAP_FILE | MAP_SHARED, fd, 0); while (count-- && buf[9124] != 55732) sleep(1); if (count <= 0) exit(1); buf[1763] = 7268; exit(0); } fd = open(DATA,O_RDWR); if (fd == -1) exit(1); buf = (int *)mmap(NULL, 10000*sizeof(int), (PROT_READ | PROT_WRITE), MAP_FILE | MAP_SHARED, fd, 0); if (buf == (int *)-1) exit(1); buf[9124] = 55732; while (count-- && buf[1763] != 7268) sleep(1); unlink(DATA); if (count > 0) exit(0); exit(1); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/tests/shared_mremap.c0000660000000000000000000000141700000000000021334 0ustar00rootroot00000000000000/* this tests whether we can use mremap */ #if defined(HAVE_UNISTD_H) #include #endif #ifdef HAVE_STDLIB_H #include #endif #include #include #include #include #define DATA "conftest.mmap" #ifndef MAP_FILE #define MAP_FILE 0 #endif #ifndef MAP_FAILED #define MAP_FAILED (int *)-1 #endif int main(void) { int *buf; int fd; int err = 1; fd = open(DATA, O_RDWR|O_CREAT|O_TRUNC, 0666); if (fd == -1) { exit(1); } buf = (int *)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, 0); if (buf == MAP_FAILED) { goto done; } buf = mremap(buf, 0x1000, 0x2000, MREMAP_MAYMOVE); if (buf == MAP_FAILED) { goto done; } err = 0; done: close(fd); unlink(DATA); exit(err); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/tests/snprintf.c0000660000000000000000000000135500000000000020371 0ustar00rootroot00000000000000void foo(const char *format, ...) { va_list ap; int len; char buf[20]; long long l = 1234567890; l *= 100; va_start(ap, format); len = vsnprintf(buf, 0, format, ap); va_end(ap); if (len != 5) exit(1); va_start(ap, format); len = vsnprintf(0, 0, format, ap); va_end(ap); if (len != 5) exit(2); if (snprintf(buf, 3, "hello") != 5 || strcmp(buf, "he") != 0) exit(3); if (snprintf(buf, 20, "%lld", l) != 12 || strcmp(buf, "123456789000") != 0) exit(4); if (snprintf(buf, 20, "%zu", 123456789) != 9 || strcmp(buf, "123456789") != 0) exit(5); if (snprintf(buf, 20, "%2\$d %1\$d", 3, 4) != 3 || strcmp(buf, "4 3") != 0) exit(6); if (snprintf(buf, 20, "%s", 0) < 3) exit(7); printf("1"); exit(0); } int main(void) { foo("hello"); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0052056 tevent-0.11.0/lib/replace/tests/strptime.c0000660000000000000000000000660600000000000020401 0ustar00rootroot00000000000000 #ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME #include #include #include #define true 1 #define false 0 #ifndef __STRING #define __STRING(x) #x #endif /* make printf a no-op */ #define printf if(0) printf #else /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ #include "replace.h" #include "system/time.h" #include "replace-test.h" #endif /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ int libreplace_test_strptime(void) { const char *s = "20070414101546Z"; char *ret; struct tm t, t2; memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); printf("test: strptime\n"); ret = strptime(s, "%Y%m%d%H%M%S", &t); if ( ret == NULL ) { printf("failure: strptime [\n" "returned NULL\n" "]\n"); return false; } if ( *ret != 'Z' ) { printf("failure: strptime [\n" "ret doesn't point to 'Z'\n" "]\n"); return false; } ret = strptime(s, "%Y%m%d%H%M%SZ", &t2); if ( ret == NULL ) { printf("failure: strptime [\n" "returned NULL with Z\n" "]\n"); return false; } if ( *ret != '\0' ) { printf("failure: strptime [\n" "ret doesn't point to '\\0'\n" "]\n"); return false; } #define CMP_TM_ELEMENT(t1,t2,elem) \ if (t1.elem != t2.elem) { \ printf("failure: strptime [\n" \ "result differs if the format string has a 'Z' at the end\n" \ "element: %s %d != %d\n" \ "]\n", \ __STRING(elen), t1.elem, t2.elem); \ return false; \ } CMP_TM_ELEMENT(t,t2,tm_sec); CMP_TM_ELEMENT(t,t2,tm_min); CMP_TM_ELEMENT(t,t2,tm_hour); CMP_TM_ELEMENT(t,t2,tm_mday); CMP_TM_ELEMENT(t,t2,tm_mon); CMP_TM_ELEMENT(t,t2,tm_year); CMP_TM_ELEMENT(t,t2,tm_wday); CMP_TM_ELEMENT(t,t2,tm_yday); CMP_TM_ELEMENT(t,t2,tm_isdst); if (t.tm_sec != 46) { printf("failure: strptime [\n" "tm_sec: expected: 46, got: %d\n" "]\n", t.tm_sec); return false; } if (t.tm_min != 15) { printf("failure: strptime [\n" "tm_min: expected: 15, got: %d\n" "]\n", t.tm_min); return false; } if (t.tm_hour != 10) { printf("failure: strptime [\n" "tm_hour: expected: 10, got: %d\n" "]\n", t.tm_hour); return false; } if (t.tm_mday != 14) { printf("failure: strptime [\n" "tm_mday: expected: 14, got: %d\n" "]\n", t.tm_mday); return false; } if (t.tm_mon != 3) { printf("failure: strptime [\n" "tm_mon: expected: 3, got: %d\n" "]\n", t.tm_mon); return false; } if (t.tm_year != 107) { printf("failure: strptime [\n" "tm_year: expected: 107, got: %d\n" "]\n", t.tm_year); return false; } if (t.tm_wday != 6) { /* saturday */ printf("failure: strptime [\n" "tm_wday: expected: 6, got: %d\n" "]\n", t.tm_wday); return false; } if (t.tm_yday != 103) { printf("failure: strptime [\n" "tm_yday: expected: 103, got: %d\n" "]\n", t.tm_yday); return false; } /* we don't test this as it depends on the host configuration if (t.tm_isdst != 0) { printf("failure: strptime [\n" "tm_isdst: expected: 0, got: %d\n" "]\n", t.tm_isdst); return false; }*/ printf("success: strptime\n"); return true; } #ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME int main (void) { int ret; ret = libreplace_test_strptime(); if (ret == false) return 1; return 0; } #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/replace/tests/testsuite.c0000660000000000000000000007430700000000000020566 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. libreplace tests Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "replace-test.h" #include "replace-testsuite.h" /* we include all the system/ include files here so that libreplace tests them in the build farm */ #include "system/capability.h" #include "system/dir.h" #include "system/filesys.h" #include "system/glob.h" #include "system/iconv.h" #include "system/locale.h" #include "system/network.h" #include "system/passwd.h" #include "system/readline.h" #include "system/select.h" #include "system/shmem.h" #include "system/syslog.h" #include "system/terminal.h" #include "system/time.h" #include "system/wait.h" #define TESTFILE "testfile.dat" /* test ftruncate() function */ static int test_ftruncate(void) { struct stat st; int fd; const int size = 1234; printf("test: ftruncate\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: ftruncate [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (ftruncate(fd, size) != 0) { printf("failure: ftruncate [\n%s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st) != 0) { printf("failure: ftruncate [\nfstat failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (st.st_size != size) { printf("failure: ftruncate [\ngave wrong size %d - expected %d\n]\n", (int)st.st_size, size); close(fd); return false; } unlink(TESTFILE); printf("success: ftruncate\n"); close(fd); return true; } /* test strlcpy() function. see http://www.gratisoft.us/todd/papers/strlcpy.html */ static int test_strlcpy(void) { char buf[4]; const struct { const char *src; size_t result; } tests[] = { { "abc", 3 }, { "abcdef", 6 }, { "abcd", 4 }, { "", 0 }, { NULL, 0 } }; int i; printf("test: strlcpy\n"); for (i=0;tests[i].src;i++) { if (strlcpy(buf, tests[i].src, sizeof(buf)) != tests[i].result) { printf("failure: strlcpy [\ntest %d failed\n]\n", i); return false; } } printf("success: strlcpy\n"); return true; } static int test_strlcat(void) { char tmp[10]; printf("test: strlcat\n"); strlcpy(tmp, "", sizeof(tmp)); if (strlcat(tmp, "bla", 3) != 3) { printf("failure: strlcat [\ninvalid return code\n]\n"); return false; } if (strcmp(tmp, "bl") != 0) { printf("failure: strlcat [\nexpected \"bl\", got \"%s\"\n]\n", tmp); return false; } strlcpy(tmp, "da", sizeof(tmp)); if (strlcat(tmp, "me", 4) != 4) { printf("failure: strlcat [\nexpected \"dam\", got \"%s\"\n]\n", tmp); return false; } printf("success: strlcat\n"); return true; } static int test_mktime(void) { /* FIXME */ return true; } static int test_initgroups(void) { /* FIXME */ return true; } static int test_memmove(void) { /* FIXME */ return true; } static int test_strdup(void) { char *x; int cmp; printf("test: strdup\n"); x = strdup("bla"); cmp = strcmp("bla", x); if (cmp != 0) { printf("failure: strdup [\nfailed: expected \"bla\", got \"%s\"\n]\n", x); free(x); return false; } free(x); printf("success: strdup\n"); return true; } static int test_setlinebuf(void) { printf("test: setlinebuf\n"); setlinebuf(stdout); printf("success: setlinebuf\n"); return true; } static int test_vsyslog(void) { /* FIXME */ return true; } static int test_timegm(void) { /* FIXME */ return true; } static int test_setenv(void) { #define TEST_SETENV(key, value, overwrite, result) do { \ int _ret; \ char *_v; \ _ret = setenv(key, value, overwrite); \ if (_ret != 0) { \ printf("failure: setenv [\n" \ "setenv(%s, %s, %d) failed\n" \ "]\n", \ key, value, overwrite); \ return false; \ } \ _v=getenv(key); \ if (!_v) { \ printf("failure: setenv [\n" \ "getenv(%s) returned NULL\n" \ "]\n", \ key); \ return false; \ } \ if (strcmp(result, _v) != 0) { \ printf("failure: setenv [\n" \ "getenv(%s): '%s' != '%s'\n" \ "]\n", \ key, result, _v); \ return false; \ } \ } while(0) #define TEST_UNSETENV(key) do { \ char *_v; \ unsetenv(key); \ _v=getenv(key); \ if (_v) { \ printf("failure: setenv [\n" \ "getenv(%s): NULL != '%s'\n" \ "]\n", \ SETENVTEST_KEY, _v); \ return false; \ } \ } while (0) #define SETENVTEST_KEY "SETENVTESTKEY" #define SETENVTEST_VAL "SETENVTESTVAL" printf("test: setenv\n"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"1", 0, SETENVTEST_VAL"1"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"2", 0, SETENVTEST_VAL"1"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"3", 1, SETENVTEST_VAL"3"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"4", 1, SETENVTEST_VAL"4"); TEST_UNSETENV(SETENVTEST_KEY); TEST_UNSETENV(SETENVTEST_KEY); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"5", 0, SETENVTEST_VAL"5"); TEST_UNSETENV(SETENVTEST_KEY); TEST_UNSETENV(SETENVTEST_KEY); printf("success: setenv\n"); return true; } static int test_strndup(void) { char *x; int cmp; printf("test: strndup\n"); x = strndup("bla", 0); cmp = strcmp(x, ""); free(x); if (cmp != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } x = strndup("bla", 2); cmp = strcmp(x, "bl"); free(x); if (cmp != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } #ifdef __GNUC__ # if __GNUC__ < 11 /* * This code will not compile with gcc11 -O3 anymore. * * error: ‘strndup’ specified bound 10 exceeds source size 4 [-Werror=stringop-overread] * x = strndup("bla", 10); * ^~~~~~~~~~~~~~~~~~ */ x = strndup("bla", 10); cmp = strcmp(x, "bla"); free(x); if (cmp != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } # endif #endif /* __GNUC__ */ printf("success: strndup\n"); return true; } static int test_strnlen(void) { printf("test: strnlen\n"); if (strnlen("bla", 2) != 2) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } if (strnlen("some text\n", 0) != 0) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } if (strnlen("some text", 20) != 9) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } printf("success: strnlen\n"); return true; } static int test_waitpid(void) { /* FIXME */ return true; } static int test_seteuid(void) { /* FIXME */ return true; } static int test_setegid(void) { /* FIXME */ return true; } static int test_asprintf(void) { char *x = NULL; printf("test: asprintf\n"); if (asprintf(&x, "%d", 9) != 1) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); free(x); return false; } if (strcmp(x, "9") != 0) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); free(x); return false; } if (asprintf(&x, "dat%s", "a") != 4) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); free(x); return false; } if (strcmp(x, "data") != 0) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); free(x); return false; } free(x); printf("success: asprintf\n"); return true; } static int test_snprintf(void) { char tmp[10]; printf("test: snprintf\n"); if (snprintf(tmp, 3, "foo%d", 9) != 4) { printf("failure: snprintf [\nsnprintf return code failed\n]\n"); return false; } if (strcmp(tmp, "fo") != 0) { printf("failure: snprintf [\nsnprintf failed\n]\n"); return false; } printf("success: snprintf\n"); return true; } static int test_vasprintf(void) { /* FIXME */ return true; } static int test_vsnprintf(void) { /* FIXME */ return true; } static int test_opendir(void) { /* FIXME */ return true; } static int test_readdir(void) { printf("test: readdir\n"); if (test_readdir_os2_delete() != 0) { return false; } printf("success: readdir\n"); return true; } static int test_telldir(void) { /* FIXME */ return true; } static int test_seekdir(void) { /* FIXME */ return true; } static int test_dlopen(void) { /* FIXME: test dlopen, dlsym, dlclose, dlerror */ return true; } static int test_chroot(void) { /* FIXME: chroot() */ return true; } static int test_bzero(void) { /* FIXME: bzero */ return true; } static int test_strerror(void) { /* FIXME */ return true; } static int test_errno(void) { printf("test: errno\n"); errno = 3; if (errno != 3) { printf("failure: errno [\nerrno failed\n]\n"); return false; } printf("success: errno\n"); return true; } static int test_mkdtemp(void) { /* FIXME */ return true; } static int test_mkstemp(void) { /* FIXME */ return true; } static int test_pread(void) { /* FIXME */ return true; } static int test_pwrite(void) { /* FIXME */ return true; } static int test_inet_ntoa(void) { /* FIXME */ return true; } #define TEST_STRTO_X(type,fmt,func,str,base,res,diff,rrnoo) do {\ type _v; \ char _s[64]; \ char *_p = NULL;\ char *_ep = NULL; \ strlcpy(_s, str, sizeof(_s));\ if (diff >= 0) { \ _ep = &_s[diff]; \ } \ errno = 0; \ _v = func(_s, &_p, base); \ if (errno != rrnoo) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ "\terrno: %d != %d\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v, rrnoo, errno); \ return false; \ } else if (_v != res) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " != " fmt "\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v); \ return false; \ } else if (_p != _ep) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ "\tptr: %p - %p = %d != %d\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v, _ep, _p, (int)(diff - (_ep - _p)), diff); \ return false; \ } \ } while (0) static int test_strtoll(void) { printf("test: strtoll\n"); #define TEST_STRTOLL(str,base,res,diff,errnoo) TEST_STRTO_X(long long int, "%lld", strtoll,str,base,res,diff,errnoo) TEST_STRTOLL("15", 10, 15LL, 2, 0); TEST_STRTOLL(" 15", 10, 15LL, 4, 0); TEST_STRTOLL("15", 0, 15LL, 2, 0); TEST_STRTOLL(" 15 ", 0, 15LL, 3, 0); TEST_STRTOLL("+15", 10, 15LL, 3, 0); TEST_STRTOLL(" +15", 10, 15LL, 5, 0); TEST_STRTOLL("+15", 0, 15LL, 3, 0); TEST_STRTOLL(" +15 ", 0, 15LL, 4, 0); TEST_STRTOLL("-15", 10, -15LL, 3, 0); TEST_STRTOLL(" -15", 10, -15LL, 5, 0); TEST_STRTOLL("-15", 0, -15LL, 3, 0); TEST_STRTOLL(" -15 ", 0, -15LL, 4, 0); TEST_STRTOLL("015", 10, 15LL, 3, 0); TEST_STRTOLL(" 015", 10, 15LL, 5, 0); TEST_STRTOLL("015", 0, 13LL, 3, 0); TEST_STRTOLL(" 015", 0, 13LL, 5, 0); TEST_STRTOLL("0x15", 10, 0LL, 1, 0); TEST_STRTOLL(" 0x15", 10, 0LL, 3, 0); TEST_STRTOLL("0x15", 0, 21LL, 4, 0); TEST_STRTOLL(" 0x15", 0, 21LL, 6, 0); TEST_STRTOLL("10", 16, 16LL, 2, 0); TEST_STRTOLL(" 10 ", 16, 16LL, 4, 0); TEST_STRTOLL("0x10", 16, 16LL, 4, 0); TEST_STRTOLL("0x10", 0, 16LL, 4, 0); TEST_STRTOLL(" 0x10 ", 0, 16LL, 5, 0); TEST_STRTOLL("+10", 16, 16LL, 3, 0); TEST_STRTOLL(" +10 ", 16, 16LL, 5, 0); TEST_STRTOLL("+0x10", 16, 16LL, 5, 0); TEST_STRTOLL("+0x10", 0, 16LL, 5, 0); TEST_STRTOLL(" +0x10 ", 0, 16LL, 6, 0); TEST_STRTOLL("-10", 16, -16LL, 3, 0); TEST_STRTOLL(" -10 ", 16, -16LL, 5, 0); TEST_STRTOLL("-0x10", 16, -16LL, 5, 0); TEST_STRTOLL("-0x10", 0, -16LL, 5, 0); TEST_STRTOLL(" -0x10 ", 0, -16LL, 6, 0); TEST_STRTOLL("010", 16, 16LL, 3, 0); TEST_STRTOLL(" 010 ", 16, 16LL, 5, 0); TEST_STRTOLL("-010", 16, -16LL, 4, 0); TEST_STRTOLL("11", 8, 9LL, 2, 0); TEST_STRTOLL("011", 8, 9LL, 3, 0); TEST_STRTOLL("011", 0, 9LL, 3, 0); TEST_STRTOLL("-11", 8, -9LL, 3, 0); TEST_STRTOLL("-011", 8, -9LL, 4, 0); TEST_STRTOLL("-011", 0, -9LL, 4, 0); TEST_STRTOLL("011", 8, 9LL, 3, 0); TEST_STRTOLL("011", 0, 9LL, 3, 0); TEST_STRTOLL("-11", 8, -9LL, 3, 0); TEST_STRTOLL("-011", 8, -9LL, 4, 0); TEST_STRTOLL("-011", 0, -9LL, 4, 0); TEST_STRTOLL("Text", 0, 0LL, 0, 0); TEST_STRTOLL("9223372036854775807", 10, 9223372036854775807LL, 19, 0); TEST_STRTOLL("9223372036854775807", 0, 9223372036854775807LL, 19, 0); TEST_STRTOLL("9223372036854775808", 0, 9223372036854775807LL, 19, ERANGE); TEST_STRTOLL("9223372036854775808", 10, 9223372036854775807LL, 19, ERANGE); TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LL, 18, 0); TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 18, 0); TEST_STRTOLL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 16, 0); TEST_STRTOLL("0x8000000000000000", 0, 9223372036854775807LL, 18, ERANGE); TEST_STRTOLL("0x8000000000000000", 16, 9223372036854775807LL, 18, ERANGE); TEST_STRTOLL("80000000000000000", 16, 9223372036854775807LL, 17, ERANGE); TEST_STRTOLL("0777777777777777777777", 0, 9223372036854775807LL, 22, 0); TEST_STRTOLL("0777777777777777777777", 8, 9223372036854775807LL, 22, 0); TEST_STRTOLL("777777777777777777777", 8, 9223372036854775807LL, 21, 0); TEST_STRTOLL("01000000000000000000000", 0, 9223372036854775807LL, 23, ERANGE); TEST_STRTOLL("01000000000000000000000", 8, 9223372036854775807LL, 23, ERANGE); TEST_STRTOLL("1000000000000000000000", 8, 9223372036854775807LL, 22, ERANGE); TEST_STRTOLL("-9223372036854775808", 10, -9223372036854775807LL -1, 20, 0); TEST_STRTOLL("-9223372036854775808", 0, -9223372036854775807LL -1, 20, 0); TEST_STRTOLL("-9223372036854775809", 0, -9223372036854775807LL -1, 20, ERANGE); TEST_STRTOLL("-9223372036854775809", 10, -9223372036854775807LL -1, 20, ERANGE); TEST_STRTOLL("-0x8000000000000000", 0, -9223372036854775807LL -1, 19, 0); TEST_STRTOLL("-0x8000000000000000", 16, -9223372036854775807LL -1, 19, 0); TEST_STRTOLL("-8000000000000000", 16, -9223372036854775807LL -1, 17, 0); TEST_STRTOLL("-0x8000000000000001", 0, -9223372036854775807LL -1, 19, ERANGE); TEST_STRTOLL("-0x8000000000000001", 16, -9223372036854775807LL -1, 19, ERANGE); TEST_STRTOLL("-80000000000000001", 16, -9223372036854775807LL -1, 18, ERANGE); TEST_STRTOLL("-01000000000000000000000",0, -9223372036854775807LL -1, 24, 0); TEST_STRTOLL("-01000000000000000000000",8, -9223372036854775807LL -1, 24, 0); TEST_STRTOLL("-1000000000000000000000", 8, -9223372036854775807LL -1, 23, 0); TEST_STRTOLL("-01000000000000000000001",0, -9223372036854775807LL -1, 24, ERANGE); TEST_STRTOLL("-01000000000000000000001",8, -9223372036854775807LL -1, 24, ERANGE); TEST_STRTOLL("-1000000000000000000001", 8, -9223372036854775807LL -1, 23, ERANGE); printf("success: strtoll\n"); return true; } static int test_strtoull(void) { printf("test: strtoull\n"); #define TEST_STRTOULL(str,base,res,diff,errnoo) TEST_STRTO_X(long long unsigned int,"%llu",strtoull,str,base,res,diff,errnoo) TEST_STRTOULL("15", 10, 15LLU, 2, 0); TEST_STRTOULL(" 15", 10, 15LLU, 4, 0); TEST_STRTOULL("15", 0, 15LLU, 2, 0); TEST_STRTOULL(" 15 ", 0, 15LLU, 3, 0); TEST_STRTOULL("+15", 10, 15LLU, 3, 0); TEST_STRTOULL(" +15", 10, 15LLU, 5, 0); TEST_STRTOULL("+15", 0, 15LLU, 3, 0); TEST_STRTOULL(" +15 ", 0, 15LLU, 4, 0); TEST_STRTOULL("-15", 10, 18446744073709551601LLU, 3, 0); TEST_STRTOULL(" -15", 10, 18446744073709551601LLU, 5, 0); TEST_STRTOULL("-15", 0, 18446744073709551601LLU, 3, 0); TEST_STRTOULL(" -15 ", 0, 18446744073709551601LLU, 4, 0); TEST_STRTOULL("015", 10, 15LLU, 3, 0); TEST_STRTOULL(" 015", 10, 15LLU, 5, 0); TEST_STRTOULL("015", 0, 13LLU, 3, 0); TEST_STRTOULL(" 015", 0, 13LLU, 5, 0); TEST_STRTOULL("0x15", 10, 0LLU, 1, 0); TEST_STRTOULL(" 0x15", 10, 0LLU, 3, 0); TEST_STRTOULL("0x15", 0, 21LLU, 4, 0); TEST_STRTOULL(" 0x15", 0, 21LLU, 6, 0); TEST_STRTOULL("10", 16, 16LLU, 2, 0); TEST_STRTOULL(" 10 ", 16, 16LLU, 4, 0); TEST_STRTOULL("0x10", 16, 16LLU, 4, 0); TEST_STRTOULL("0x10", 0, 16LLU, 4, 0); TEST_STRTOULL(" 0x10 ", 0, 16LLU, 5, 0); TEST_STRTOULL("+10", 16, 16LLU, 3, 0); TEST_STRTOULL(" +10 ", 16, 16LLU, 5, 0); TEST_STRTOULL("+0x10", 16, 16LLU, 5, 0); TEST_STRTOULL("+0x10", 0, 16LLU, 5, 0); TEST_STRTOULL(" +0x10 ", 0, 16LLU, 6, 0); TEST_STRTOULL("-10", 16, -16LLU, 3, 0); TEST_STRTOULL(" -10 ", 16, -16LLU, 5, 0); TEST_STRTOULL("-0x10", 16, -16LLU, 5, 0); TEST_STRTOULL("-0x10", 0, -16LLU, 5, 0); TEST_STRTOULL(" -0x10 ", 0, -16LLU, 6, 0); TEST_STRTOULL("010", 16, 16LLU, 3, 0); TEST_STRTOULL(" 010 ", 16, 16LLU, 5, 0); TEST_STRTOULL("-010", 16, -16LLU, 4, 0); TEST_STRTOULL("11", 8, 9LLU, 2, 0); TEST_STRTOULL("011", 8, 9LLU, 3, 0); TEST_STRTOULL("011", 0, 9LLU, 3, 0); TEST_STRTOULL("-11", 8, -9LLU, 3, 0); TEST_STRTOULL("-011", 8, -9LLU, 4, 0); TEST_STRTOULL("-011", 0, -9LLU, 4, 0); TEST_STRTOULL("011", 8, 9LLU, 3, 0); TEST_STRTOULL("011", 0, 9LLU, 3, 0); TEST_STRTOULL("-11", 8, -9LLU, 3, 0); TEST_STRTOULL("-011", 8, -9LLU, 4, 0); TEST_STRTOULL("-011", 0, -9LLU, 4, 0); TEST_STRTOULL("Text", 0, 0LLU, 0, 0); TEST_STRTOULL("9223372036854775807", 10, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("9223372036854775807", 0, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("9223372036854775808", 0, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("9223372036854775808", 10, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LLU, 18, 0); TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 18, 0); TEST_STRTOULL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 16, 0); TEST_STRTOULL("0x8000000000000000", 0, 9223372036854775808LLU, 18, 0); TEST_STRTOULL("0x8000000000000000", 16, 9223372036854775808LLU, 18, 0); TEST_STRTOULL("8000000000000000", 16, 9223372036854775808LLU, 16, 0); TEST_STRTOULL("0777777777777777777777", 0, 9223372036854775807LLU, 22, 0); TEST_STRTOULL("0777777777777777777777", 8, 9223372036854775807LLU, 22, 0); TEST_STRTOULL("777777777777777777777", 8, 9223372036854775807LLU, 21, 0); TEST_STRTOULL("01000000000000000000000",0, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("01000000000000000000000",8, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("1000000000000000000000", 8, 9223372036854775808LLU, 22, 0); TEST_STRTOULL("-9223372036854775808", 10, 9223372036854775808LLU, 20, 0); TEST_STRTOULL("-9223372036854775808", 0, 9223372036854775808LLU, 20, 0); TEST_STRTOULL("-9223372036854775809", 0, 9223372036854775807LLU, 20, 0); TEST_STRTOULL("-9223372036854775809", 10, 9223372036854775807LLU, 20, 0); TEST_STRTOULL("-0x8000000000000000", 0, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("-0x8000000000000000", 16, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("-8000000000000000", 16, 9223372036854775808LLU, 17, 0); TEST_STRTOULL("-0x8000000000000001", 0, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("-0x8000000000000001", 16, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("-8000000000000001", 16, 9223372036854775807LLU, 17, 0); TEST_STRTOULL("-01000000000000000000000",0, 9223372036854775808LLU, 24, 0); TEST_STRTOULL("-01000000000000000000000",8, 9223372036854775808LLU, 24, 0); TEST_STRTOULL("-1000000000000000000000",8, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("-01000000000000000000001",0, 9223372036854775807LLU, 24, 0); TEST_STRTOULL("-01000000000000000000001",8, 9223372036854775807LLU, 24, 0); TEST_STRTOULL("-1000000000000000000001",8, 9223372036854775807LLU, 23, 0); TEST_STRTOULL("18446744073709551615", 0, 18446744073709551615LLU, 20, 0); TEST_STRTOULL("18446744073709551615", 10, 18446744073709551615LLU, 20, 0); TEST_STRTOULL("18446744073709551616", 0, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("18446744073709551616", 10, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 0, 18446744073709551615LLU, 18, 0); TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 18, 0); TEST_STRTOULL("FFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 16, 0); TEST_STRTOULL("0x10000000000000000", 0, 18446744073709551615LLU, 19, ERANGE); TEST_STRTOULL("0x10000000000000000", 16, 18446744073709551615LLU, 19, ERANGE); TEST_STRTOULL("10000000000000000", 16, 18446744073709551615LLU, 17, ERANGE); TEST_STRTOULL("01777777777777777777777",0, 18446744073709551615LLU, 23, 0); TEST_STRTOULL("01777777777777777777777",8, 18446744073709551615LLU, 23, 0); TEST_STRTOULL("1777777777777777777777", 8, 18446744073709551615LLU, 22, 0); TEST_STRTOULL("02000000000000000000000",0, 18446744073709551615LLU, 23, ERANGE); TEST_STRTOULL("02000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); TEST_STRTOULL("2000000000000000000000", 8, 18446744073709551615LLU, 22, ERANGE); TEST_STRTOULL("-18446744073709551615", 0, 1LLU, 21, 0); TEST_STRTOULL("-18446744073709551615", 10, 1LLU, 21, 0); TEST_STRTOULL("-18446744073709551616", 0, 18446744073709551615LLU, 21, ERANGE); TEST_STRTOULL("-18446744073709551616", 10, 18446744073709551615LLU, 21, ERANGE); TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 0, 1LLU, 19, 0); TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 16, 1LLU, 19, 0); TEST_STRTOULL("-FFFFFFFFFFFFFFFF", 16, 1LLU, 17, 0); TEST_STRTOULL("-0x10000000000000000", 0, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("-0x10000000000000000", 16, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("-10000000000000000", 16, 18446744073709551615LLU, 18, ERANGE); TEST_STRTOULL("-01777777777777777777777",0, 1LLU, 24, 0); TEST_STRTOULL("-01777777777777777777777",8, 1LLU, 24, 0); TEST_STRTOULL("-1777777777777777777777",8, 1LLU, 23, 0); TEST_STRTOULL("-02000000000000000000000",0, 18446744073709551615LLU, 24, ERANGE); TEST_STRTOULL("-02000000000000000000000",8, 18446744073709551615LLU, 24, ERANGE); TEST_STRTOULL("-2000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); printf("success: strtoull\n"); return true; } /* FIXME: Types: bool socklen_t uint{8,16,32,64}_t int{8,16,32,64}_t intptr_t Constants: PATH_NAME_MAX UINT{16,32,64}_MAX INT32_MAX */ static int test_va_copy(void) { /* FIXME */ return true; } static int test_FUNCTION(void) { printf("test: FUNCTION\n"); if (strcmp(__FUNCTION__, "test_FUNCTION") != 0) { printf("failure: FUNCTION [\nFUNCTION invalid\n]\n"); return false; } printf("success: FUNCTION\n"); return true; } static int test_MIN(void) { printf("test: MIN\n"); if (MIN(20, 1) != 1) { printf("failure: MIN [\nMIN invalid\n]\n"); return false; } if (MIN(1, 20) != 1) { printf("failure: MIN [\nMIN invalid\n]\n"); return false; } printf("success: MIN\n"); return true; } static int test_MAX(void) { printf("test: MAX\n"); if (MAX(20, 1) != 20) { printf("failure: MAX [\nMAX invalid\n]\n"); return false; } if (MAX(1, 20) != 20) { printf("failure: MAX [\nMAX invalid\n]\n"); return false; } printf("success: MAX\n"); return true; } static int test_socketpair(void) { int sock[2]; char buf[20]; printf("test: socketpair\n"); if (socketpair(AF_UNIX, SOCK_STREAM, 0, sock) == -1) { printf("failure: socketpair [\n" "socketpair() failed\n" "]\n"); return false; } if (write(sock[1], "automatisch", 12) == -1) { printf("failure: socketpair [\n" "write() failed: %s\n" "]\n", strerror(errno)); return false; } if (read(sock[0], buf, 12) == -1) { printf("failure: socketpair [\n" "read() failed: %s\n" "]\n", strerror(errno)); return false; } if (strcmp(buf, "automatisch") != 0) { printf("failure: socketpair [\n" "expected: automatisch, got: %s\n" "]\n", buf); return false; } printf("success: socketpair\n"); return true; } extern int libreplace_test_strptime(void); static int test_strptime(void) { return libreplace_test_strptime(); } extern int getifaddrs_test(void); static int test_getifaddrs(void) { printf("test: getifaddrs\n"); if (getifaddrs_test() != 0) { printf("failure: getifaddrs\n"); return false; } printf("success: getifaddrs\n"); return true; } static int test_utime(void) { struct utimbuf u; struct stat st1, st2, st3; int fd; printf("test: utime\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: utime [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (fstat(fd, &st1) != 0) { printf("failure: utime [\n" "fstat (1) failed - %s\n]\n", strerror(errno)); close(fd); return false; } u.actime = st1.st_atime + 300; u.modtime = st1.st_mtime - 300; if (utime(TESTFILE, &u) != 0) { printf("failure: utime [\n" "utime(&u) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st2) != 0) { printf("failure: utime [\n" "fstat (2) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (utime(TESTFILE, NULL) != 0) { printf("failure: utime [\n" "utime(NULL) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st3) != 0) { printf("failure: utime [\n" "fstat (3) failed - %s\n]\n", strerror(errno)); close(fd); return false; } #define CMP_VAL(a,c,b) do { \ if (a c b) { \ printf("failure: utime [\n" \ "%s: %s(%d) %s %s(%d)\n]\n", \ __location__, \ #a, (int)a, #c, #b, (int)b); \ close(fd); \ return false; \ } \ } while(0) #define EQUAL_VAL(a,b) CMP_VAL(a,!=,b) #define GREATER_VAL(a,b) CMP_VAL(a,<=,b) #define LESSER_VAL(a,b) CMP_VAL(a,>=,b) EQUAL_VAL(st2.st_atime, st1.st_atime + 300); EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); LESSER_VAL(st3.st_atime, st2.st_atime); GREATER_VAL(st3.st_mtime, st2.st_mtime); #undef CMP_VAL #undef EQUAL_VAL #undef GREATER_VAL #undef LESSER_VAL unlink(TESTFILE); printf("success: utime\n"); close(fd); return true; } static int test_utimes(void) { struct timeval tv[2]; struct stat st1, st2; int fd; printf("test: utimes\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: utimes [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (fstat(fd, &st1) != 0) { printf("failure: utimes [\n" "fstat (1) failed - %s\n]\n", strerror(errno)); close(fd); return false; } ZERO_STRUCT(tv); tv[0].tv_sec = st1.st_atime + 300; tv[1].tv_sec = st1.st_mtime - 300; if (utimes(TESTFILE, tv) != 0) { printf("failure: utimes [\n" "utimes(tv) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st2) != 0) { printf("failure: utimes [\n" "fstat (2) failed - %s\n]\n", strerror(errno)); close(fd); return false; } #define EQUAL_VAL(a,b) do { \ if (a != b) { \ printf("failure: utimes [\n" \ "%s: %s(%d) != %s(%d)\n]\n", \ __location__, \ #a, (int)a, #b, (int)b); \ close(fd); \ return false; \ } \ } while(0) EQUAL_VAL(st2.st_atime, st1.st_atime + 300); EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); #undef EQUAL_VAL unlink(TESTFILE); printf("success: utimes\n"); close(fd); return true; } static int test_memmem(void) { char *s; printf("test: memmem\n"); s = (char *)memmem("foo", 3, "fo", 2); if (strcmp(s, "foo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foo", 3, "", 0); /* it is allowable for this to return NULL (as happens on FreeBSD) */ if (s && strcmp(s, "foo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foo", 4, "o", 1); if (strcmp(s, "oo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foobarfodx", 11, "fod", 3); if (strcmp(s, "fodx") != 0) { printf(__location__ ": Failed memmem\n"); return false; } printf("success: memmem\n"); return true; } static bool test_closefrom(void) { int i, fd; for (i=0; i<100; i++) { fd = dup(0); if (fd == -1) { perror("dup failed"); closefrom(3); return false; } /* 1000 is just an arbitrarily chosen upper bound */ if (fd >= 1000) { printf("fd=%d\n", fd); closefrom(3); return false; } } closefrom(3); for (i=3; i<=fd; i++) { off_t off; off = lseek(i, 0, SEEK_CUR); if ((off != (off_t)-1) || (errno != EBADF)) { printf("fd %d not closed\n", i); return false; } } return true; } static bool test_array_del_element(void) { int a[] = { 1,2,3,4,5 }; printf("test: array_del_element\n"); ARRAY_DEL_ELEMENT(a, 4, ARRAY_SIZE(a)); if ((a[0] != 1) || (a[1] != 2) || (a[2] != 3) || (a[3] != 4) || (a[4] != 5)) { return false; } ARRAY_DEL_ELEMENT(a, 0, ARRAY_SIZE(a)); if ((a[0] != 2) || (a[1] != 3) || (a[2] != 4) || (a[3] != 5) || (a[4] != 5)) { return false; } ARRAY_DEL_ELEMENT(a, 2, ARRAY_SIZE(a)); if ((a[0] != 2) || (a[1] != 3) || (a[2] != 5) || (a[3] != 5) || (a[4] != 5)) { return false; } printf("success: array_del_element\n"); return true; } bool torture_local_replace(struct torture_context *ctx) { bool ret = true; ret &= test_ftruncate(); ret &= test_strlcpy(); ret &= test_strlcat(); ret &= test_mktime(); ret &= test_initgroups(); ret &= test_memmove(); ret &= test_strdup(); ret &= test_setlinebuf(); ret &= test_vsyslog(); ret &= test_timegm(); ret &= test_setenv(); ret &= test_strndup(); ret &= test_strnlen(); ret &= test_waitpid(); ret &= test_seteuid(); ret &= test_setegid(); ret &= test_asprintf(); ret &= test_snprintf(); ret &= test_vasprintf(); ret &= test_vsnprintf(); ret &= test_opendir(); ret &= test_readdir(); ret &= test_telldir(); ret &= test_seekdir(); ret &= test_dlopen(); ret &= test_chroot(); ret &= test_bzero(); ret &= test_strerror(); ret &= test_errno(); ret &= test_mkdtemp(); ret &= test_mkstemp(); ret &= test_pread(); ret &= test_pwrite(); ret &= test_inet_ntoa(); ret &= test_strtoll(); ret &= test_strtoull(); ret &= test_va_copy(); ret &= test_FUNCTION(); ret &= test_MIN(); ret &= test_MAX(); ret &= test_socketpair(); ret &= test_strptime(); ret &= test_getifaddrs(); ret &= test_utime(); ret &= test_utimes(); ret &= test_memmem(); ret &= test_closefrom(); ret &= test_array_del_element(); return ret; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/replace/timegm.c0000660000000000000000000000476400000000000016655 0ustar00rootroot00000000000000/* * Copyright (c) 1997 Kungliga Tekniska Högskolan * (Royal Institute of Technology, Stockholm, Sweden). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* adapted for Samba4 by Andrew Tridgell */ #include "replace.h" #include "system/time.h" static int is_leap(unsigned y) { y += 1900; return (y % 4) == 0 && ((y % 100) != 0 || (y % 400) == 0); } time_t rep_timegm(struct tm *tm) { static const unsigned ndays[2][12] ={ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; time_t res = 0; unsigned i; if (tm->tm_mon > 12 || tm->tm_mon < 0 || tm->tm_mday > 31 || tm->tm_min > 60 || tm->tm_sec > 60 || tm->tm_hour > 24) { /* invalid tm structure */ return 0; } for (i = 70; i < tm->tm_year; ++i) res += is_leap(i) ? 366 : 365; for (i = 0; i < tm->tm_mon; ++i) res += ndays[is_leap(tm->tm_year)][i]; res += tm->tm_mday - 1; res *= 24; res += tm->tm_hour; res *= 60; res += tm->tm_min; res *= 60; res += tm->tm_sec; return res; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/replace/win32_replace.h0000660000000000000000000001046400000000000020027 0ustar00rootroot00000000000000#ifndef _WIN32_REPLACE_H #define _WIN32_REPLACE_H #ifdef HAVE_WINSOCK2_H #include #endif #ifdef HAVE_WS2TCPIP_H #include #endif #ifdef HAVE_WINDOWS_H #include #endif /* Map BSD Socket errorcodes to the WSA errorcodes (if possible) */ #define EAFNOSUPPORT WSAEAFNOSUPPORT #define ECONNREFUSED WSAECONNREFUSED #define EINPROGRESS WSAEINPROGRESS #define EMSGSIZE WSAEMSGSIZE #define ENOBUFS WSAENOBUFS #define ENOTSOCK WSAENOTSOCK #define ENETUNREACH WSAENETUNREACH #define ENOPROTOOPT WSAENOPROTOOPT #define ENOTCONN WSAENOTCONN #define ENOTSUP 134 /* We undefine the following constants due to conflicts with the w32api headers * and the Windows Platform SDK/DDK. */ #undef interface #undef ERROR_INVALID_PARAMETER #undef ERROR_INSUFFICIENT_BUFFER #undef ERROR_INVALID_DATATYPE #undef FILE_GENERIC_READ #undef FILE_GENERIC_WRITE #undef FILE_GENERIC_EXECUTE #undef FILE_ATTRIBUTE_READONLY #undef FILE_ATTRIBUTE_HIDDEN #undef FILE_ATTRIBUTE_SYSTEM #undef FILE_ATTRIBUTE_DIRECTORY #undef FILE_ATTRIBUTE_ARCHIVE #undef FILE_ATTRIBUTE_DEVICE #undef FILE_ATTRIBUTE_NORMAL #undef FILE_ATTRIBUTE_TEMPORARY #undef FILE_ATTRIBUTE_REPARSE_POINT #undef FILE_ATTRIBUTE_COMPRESSED #undef FILE_ATTRIBUTE_OFFLINE #undef FILE_ATTRIBUTE_ENCRYPTED #undef FILE_FLAG_WRITE_THROUGH #undef FILE_FLAG_NO_BUFFERING #undef FILE_FLAG_RANDOM_ACCESS #undef FILE_FLAG_SEQUENTIAL_SCAN #undef FILE_FLAG_DELETE_ON_CLOSE #undef FILE_FLAG_BACKUP_SEMANTICS #undef FILE_FLAG_POSIX_SEMANTICS #undef FILE_TYPE_DISK #undef FILE_TYPE_UNKNOWN #undef FILE_CASE_SENSITIVE_SEARCH #undef FILE_CASE_PRESERVED_NAMES #undef FILE_UNICODE_ON_DISK #undef FILE_PERSISTENT_ACLS #undef FILE_FILE_COMPRESSION #undef FILE_VOLUME_QUOTAS #undef FILE_VOLUME_IS_COMPRESSED #undef FILE_NOTIFY_CHANGE_FILE_NAME #undef FILE_NOTIFY_CHANGE_DIR_NAME #undef FILE_NOTIFY_CHANGE_ATTRIBUTES #undef FILE_NOTIFY_CHANGE_SIZE #undef FILE_NOTIFY_CHANGE_LAST_WRITE #undef FILE_NOTIFY_CHANGE_LAST_ACCESS #undef FILE_NOTIFY_CHANGE_CREATION #undef FILE_NOTIFY_CHANGE_EA #undef FILE_NOTIFY_CHANGE_SECURITY #undef FILE_NOTIFY_CHANGE_STREAM_NAME #undef FILE_NOTIFY_CHANGE_STREAM_SIZE #undef FILE_NOTIFY_CHANGE_STREAM_WRITE #undef FILE_NOTIFY_CHANGE_NAME #undef PRINTER_ATTRIBUTE_QUEUED #undef PRINTER_ATTRIBUTE_DIRECT #undef PRINTER_ATTRIBUTE_DEFAULT #undef PRINTER_ATTRIBUTE_SHARED #undef PRINTER_ATTRIBUTE_NETWORK #undef PRINTER_ATTRIBUTE_HIDDEN #undef PRINTER_ATTRIBUTE_LOCAL #undef PRINTER_ATTRIBUTE_ENABLE_DEVQ #undef PRINTER_ATTRIBUTE_KEEPPRINTEDJOBS #undef PRINTER_ATTRIBUTE_DO_COMPLETE_FIRST #undef PRINTER_ATTRIBUTE_WORK_OFFLINE #undef PRINTER_ATTRIBUTE_ENABLE_BIDI #undef PRINTER_ATTRIBUTE_RAW_ONLY #undef PRINTER_ATTRIBUTE_PUBLISHED #undef PRINTER_ENUM_DEFAULT #undef PRINTER_ENUM_LOCAL #undef PRINTER_ENUM_CONNECTIONS #undef PRINTER_ENUM_FAVORITE #undef PRINTER_ENUM_NAME #undef PRINTER_ENUM_REMOTE #undef PRINTER_ENUM_SHARED #undef PRINTER_ENUM_NETWORK #undef PRINTER_ENUM_EXPAND #undef PRINTER_ENUM_CONTAINER #undef PRINTER_ENUM_ICON1 #undef PRINTER_ENUM_ICON2 #undef PRINTER_ENUM_ICON3 #undef PRINTER_ENUM_ICON4 #undef PRINTER_ENUM_ICON5 #undef PRINTER_ENUM_ICON6 #undef PRINTER_ENUM_ICON7 #undef PRINTER_ENUM_ICON8 #undef PRINTER_STATUS_PAUSED #undef PRINTER_STATUS_ERROR #undef PRINTER_STATUS_PENDING_DELETION #undef PRINTER_STATUS_PAPER_JAM #undef PRINTER_STATUS_PAPER_OUT #undef PRINTER_STATUS_MANUAL_FEED #undef PRINTER_STATUS_PAPER_PROBLEM #undef PRINTER_STATUS_OFFLINE #undef PRINTER_STATUS_IO_ACTIVE #undef PRINTER_STATUS_BUSY #undef PRINTER_STATUS_PRINTING #undef PRINTER_STATUS_OUTPUT_BIN_FULL #undef PRINTER_STATUS_NOT_AVAILABLE #undef PRINTER_STATUS_WAITING #undef PRINTER_STATUS_PROCESSING #undef PRINTER_STATUS_INITIALIZING #undef PRINTER_STATUS_WARMING_UP #undef PRINTER_STATUS_TONER_LOW #undef PRINTER_STATUS_NO_TONER #undef PRINTER_STATUS_PAGE_PUNT #undef PRINTER_STATUS_USER_INTERVENTION #undef PRINTER_STATUS_OUT_OF_MEMORY #undef PRINTER_STATUS_DOOR_OPEN #undef PRINTER_STATUS_SERVER_UNKNOWN #undef PRINTER_STATUS_POWER_SAVE #undef DWORD #undef HKEY_CLASSES_ROOT #undef HKEY_CURRENT_USER #undef HKEY_LOCAL_MACHINE #undef HKEY_USERS #undef HKEY_PERFORMANCE_DATA #undef HKEY_CURRENT_CONFIG #undef HKEY_DYN_DATA #undef REG_DWORD #undef REG_QWORD #undef SERVICE_STATE_ALL #undef SE_GROUP_MANDATORY #undef SE_GROUP_ENABLED_BY_DEFAULT #undef SE_GROUP_ENABLED #endif /* _WIN32_REPLACE_H */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/replace/wscript0000660000000000000000000012247000000000000016640 0ustar00rootroot00000000000000#!/usr/bin/env python APPNAME = 'libreplace' VERSION = '1.2.1' import sys import os # find the buildtools directory top = '.' while not os.path.exists(top+'/buildtools') and len(top.split('/')) < 5: top = top + '/..' sys.path.insert(0, top + '/buildtools/wafsamba') out = 'bin' import wafsamba from wafsamba import samba_dist from waflib import Options, Utils, Logs, Context samba_dist.DIST_DIRS('lib/replace buildtools:buildtools third_party/waf:third_party/waf') def options(opt): opt.BUILTIN_DEFAULT('NONE') opt.PRIVATE_EXTENSION_DEFAULT('') opt.RECURSE('buildtools/wafsamba') @Utils.run_once def configure(conf): conf.RECURSE('buildtools/wafsamba') conf.env.standalone_replace = conf.IN_LAUNCH_DIR() conf.DEFINE('BOOL_DEFINED', 1) conf.DEFINE('HAVE_LIBREPLACE', 1) conf.DEFINE('LIBREPLACE_NETWORK_CHECKS', 1) conf.CHECK_HEADERS('linux/types.h crypt.h locale.h acl/libacl.h compat.h') conf.CHECK_HEADERS('acl/libacl.h attr/xattr.h compat.h ctype.h dustat.h') conf.CHECK_HEADERS('fcntl.h fnmatch.h glob.h history.h krb5.h langinfo.h') conf.CHECK_HEADERS('locale.h ndir.h pwd.h') conf.CHECK_HEADERS('shadow.h sys/acl.h') conf.CHECK_HEADERS('sys/attributes.h attr/attributes.h sys/capability.h sys/dir.h sys/epoll.h') conf.CHECK_HEADERS('port.h') conf.CHECK_HEADERS('sys/fcntl.h sys/filio.h sys/filsys.h sys/fs/s5param.h') conf.CHECK_HEADERS('sys/id.h sys/ioctl.h sys/ipc.h sys/mman.h sys/mode.h sys/ndir.h sys/priv.h') conf.CHECK_HEADERS('sys/resource.h sys/security.h sys/shm.h sys/statfs.h sys/statvfs.h sys/termio.h') conf.CHECK_HEADERS('sys/vfs.h sys/xattr.h termio.h termios.h sys/file.h') conf.CHECK_HEADERS('sys/ucontext.h sys/wait.h sys/stat.h') if not conf.CHECK_DECLS('malloc', headers='stdlib.h'): conf.CHECK_HEADERS('malloc.h') conf.CHECK_HEADERS('grp.h') conf.CHECK_HEADERS('sys/select.h setjmp.h utime.h sys/syslog.h syslog.h') conf.CHECK_HEADERS('stdarg.h vararg.h sys/mount.h mntent.h') conf.CHECK_HEADERS('stropts.h unix.h string.h strings.h sys/param.h limits.h') conf.CHECK_HEADERS('''sys/socket.h netinet/in.h netdb.h arpa/inet.h netinet/in_systm.h netinet/ip.h netinet/tcp.h netinet/in_ip.h sys/sockio.h sys/un.h''', together=True) conf.CHECK_HEADERS('sys/uio.h ifaddrs.h direct.h dirent.h') conf.CHECK_HEADERS('windows.h winsock2.h ws2tcpip.h') conf.CHECK_HEADERS('errno.h') conf.CHECK_HEADERS('getopt.h iconv.h') conf.CHECK_HEADERS('memory.h nss.h sasl/sasl.h') conf.CHECK_FUNCS_IN('inotify_init', 'inotify', checklibc=True, headers='sys/inotify.h') conf.CHECK_HEADERS('security/pam_appl.h zlib.h asm/unistd.h') conf.CHECK_HEADERS('sys/unistd.h alloca.h float.h') conf.SET_TARGET_TYPE('tirpc', 'EMPTY') if conf.CHECK_CODE( '\n#ifndef _TIRPC_RPC_H\n#error "no tirpc headers in system path"\n#endif\n', 'HAVE_RPC_RPC_HEADERS', headers=['rpc/rpc.h', 'rpc/nettype.h'], msg='Checking for tirpc rpc headers in default system path'): if conf.CONFIG_SET('HAVE_RPC_RPC_H'): conf.undefine('HAVE_RPC_RPC_H') if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): if conf.CHECK_CFG(package='libtirpc', args='--cflags --libs', msg='Checking for libtirpc headers', uselib_store='TIRPC'): conf.CHECK_HEADERS('rpc/rpc.h rpc/nettype.h', lib='tirpc', together=True) conf.SET_TARGET_TYPE('tirpc', 'SYSLIB') if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): if conf.CHECK_CFG(package='libntirpc', args='--cflags', msg='Checking for libntirpc headers', uselib_store='TIRPC'): conf.CHECK_HEADERS('rpc/rpc.h rpc/nettype.h', lib='tirpc', together=True) conf.SET_TARGET_TYPE('tirpc', 'SYSLIB') if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): Logs.warn('No rpc/rpc.h header found, tirpc or libntirpc missing?') # This file is decprecated with glibc >= 2.30 so we need to check if it # includes a deprecation warning: # #warning "The header is deprecated and will be removed." conf.CHECK_CODE(''' #include int main(void) { return 0; } ''', define='HAVE_SYS_SYSCTL_H', cflags=['-Werror=cpp'], addmain=False, msg='Checking for header sys/sysctl.h') conf.CHECK_HEADERS('sys/fileio.h sys/filesys.h sys/dustat.h sys/sysmacros.h') conf.CHECK_HEADERS('xfs/libxfs.h netgroup.h') conf.CHECK_HEADERS('valgrind.h valgrind/valgrind.h') conf.CHECK_HEADERS('valgrind/memcheck.h valgrind/helgrind.h') conf.CHECK_HEADERS('nss_common.h nsswitch.h ns_api.h') conf.CHECK_HEADERS('sys/extattr.h sys/ea.h sys/proplist.h sys/cdefs.h') conf.CHECK_HEADERS('utmp.h utmpx.h lastlog.h') conf.CHECK_HEADERS('syscall.h sys/syscall.h inttypes.h') conf.CHECK_HEADERS('sys/atomic.h stdatomic.h') conf.CHECK_HEADERS('libgen.h') if conf.CHECK_CFLAGS('-Wno-format-truncation'): conf.define('HAVE_WNO_FORMAT_TRUNCATION', '1') if conf.CHECK_CFLAGS('-Wno-unused-function'): conf.define('HAVE_WNO_UNUSED_FUNCTION', '1') if conf.CHECK_CFLAGS('-Wno-strict-overflow'): conf.define('HAVE_WNO_STRICT_OVERFLOW', '1') # Check for process set name support conf.CHECK_CODE(''' #include int main(void) { prctl(0); return 0; } ''', 'HAVE_PRCTL', addmain=False, headers='sys/prctl.h', msg='Checking for prctl syscall') conf.CHECK_CODE(''' #include #ifdef HAVE_FCNTL_H #include #endif int main(void) { int fd = open("/dev/null", O_DIRECT); } ''', define='HAVE_OPEN_O_DIRECT', addmain=False, msg='Checking for O_DIRECT flag to open(2)') conf.CHECK_TYPES('"long long" intptr_t uintptr_t ptrdiff_t comparison_fn_t') conf.CHECK_TYPE('_Bool', define='HAVE__Bool') conf.CHECK_TYPE('bool', define='HAVE_BOOL') conf.CHECK_TYPE('int8_t', 'char') conf.CHECK_TYPE('uint8_t', 'unsigned char') conf.CHECK_TYPE('int16_t', 'short') conf.CHECK_TYPE('uint16_t', 'unsigned short') conf.CHECK_TYPE('int32_t', 'int') conf.CHECK_TYPE('uint32_t', 'unsigned') conf.CHECK_TYPE('int64_t', 'long long') conf.CHECK_TYPE('uint64_t', 'unsigned long long') conf.CHECK_TYPE('size_t', 'unsigned int') conf.CHECK_TYPE('ssize_t', 'int') conf.CHECK_TYPE('ino_t', 'unsigned') conf.CHECK_TYPE('loff_t', 'off_t') conf.CHECK_TYPE('offset_t', 'loff_t') conf.CHECK_TYPE('volatile int', define='HAVE_VOLATILE') conf.CHECK_TYPE('uint_t', 'unsigned int') conf.CHECK_TYPE('blksize_t', 'long', headers='sys/types.h sys/stat.h unistd.h') conf.CHECK_TYPE('blkcnt_t', 'long', headers='sys/types.h sys/stat.h unistd.h') conf.CHECK_SIZEOF('bool char int "long long" long short size_t ssize_t') conf.CHECK_SIZEOF('int8_t uint8_t int16_t uint16_t int32_t uint32_t int64_t uint64_t') conf.CHECK_SIZEOF('void*', define='SIZEOF_VOID_P') conf.CHECK_SIZEOF('off_t dev_t ino_t time_t') conf.CHECK_TYPES('socklen_t', headers='sys/socket.h') conf.CHECK_TYPE_IN('struct ifaddrs', 'ifaddrs.h') conf.CHECK_TYPE_IN('struct addrinfo', 'netdb.h') conf.CHECK_TYPE_IN('struct sockaddr', 'sys/socket.h') conf.CHECK_CODE('struct sockaddr_in6 x', define='HAVE_STRUCT_SOCKADDR_IN6', headers='sys/socket.h netdb.h netinet/in.h') conf.CHECK_TYPE_IN('struct sockaddr_storage', 'sys/socket.h') conf.CHECK_TYPE_IN('sa_family_t', 'sys/socket.h') conf.CHECK_TYPE_IN('sig_atomic_t', 'signal.h', define='HAVE_SIG_ATOMIC_T_TYPE') conf.CHECK_FUNCS('sigsetmask siggetmask sigprocmask sigblock sigaction sigset') # Those functions are normally available in libc if not conf.CHECK_FUNCS(''' inet_ntoa inet_aton inet_ntop inet_pton connect gethostbyname getaddrinfo getnameinfo freeaddrinfo gai_strerror socketpair''', headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h'): conf.CHECK_FUNCS_IN(''' inet_ntoa inet_aton inet_ntop inet_pton connect gethostbyname getaddrinfo getnameinfo freeaddrinfo gai_strerror socketpair''', 'socket nsl', headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h') conf.DEFINE('REPLACE_REQUIRES_LIBSOCKET_LIBNSL', 1) conf.CHECK_FUNCS('memset_s memset_explicit') conf.CHECK_CODE(''' #include int main(void) { char buf[] = "This is some content"; memset(buf, '\0', sizeof(buf)); __asm__ volatile("" : : "g"(&buf) : "memory"); return 0; } ''', define='HAVE_GCC_VOLATILE_MEMORY_PROTECTION', addmain=False, msg='Checking for volatile memory protection', local_include=False) # Some old Linux systems have broken header files and # miss the IPV6_V6ONLY define in netinet/in.h, # but have it in linux/in6.h. # We can't include both files so we just check if the value # if defined and do the replacement in system/network.h if not conf.CHECK_VARIABLE('IPV6_V6ONLY', headers='sys/socket.h netdb.h netinet/in.h'): conf.CHECK_CODE(''' #include #if (IPV6_V6ONLY != 26) #error no IPV6_V6ONLY support on linux #endif int main(void) { return IPV6_V6ONLY; } ''', define='HAVE_LINUX_IPV6_V6ONLY_26', addmain=False, msg='Checking for IPV6_V6ONLY in linux/in6.h', local_include=False) conf.CHECK_CODE(''' struct sockaddr_storage sa_store; struct addrinfo *ai = NULL; struct in6_addr in6addr; int idx = if_nametoindex("iface1"); int s = socket(AF_INET6, SOCK_STREAM, 0); int ret = getaddrinfo(NULL, NULL, NULL, &ai); if (ret != 0) { const char *es = gai_strerror(ret); } freeaddrinfo(ai); { int val = 1; #ifdef HAVE_LINUX_IPV6_V6ONLY_26 #define IPV6_V6ONLY 26 #endif ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, (const void *)&val, sizeof(val)); } ''', define='HAVE_IPV6', lib='nsl socket', headers='sys/socket.h netdb.h netinet/in.h net/if.h') if conf.CONFIG_SET('HAVE_SYS_UCONTEXT_H') and conf.CONFIG_SET('HAVE_SIGNAL_H'): conf.CHECK_CODE(''' ucontext_t uc; sigaddset(&uc.uc_sigmask, SIGUSR1); ''', 'HAVE_UCONTEXT_T', msg="Checking whether we have ucontext_t", headers='signal.h sys/ucontext.h') # Check for atomic builtins. */ conf.CHECK_CODE(''' int i; (void)__sync_fetch_and_add(&i, 1); ''', 'HAVE___SYNC_FETCH_AND_ADD', msg='Checking for __sync_fetch_and_add compiler builtin') conf.CHECK_CODE(''' int32_t i; atomic_add_32(&i, 1); ''', 'HAVE_ATOMIC_ADD_32', headers='stdint.h sys/atomic.h', msg='Checking for atomic_add_32 compiler builtin') # Check for thread fence. */ tf = conf.CHECK_CODE('atomic_thread_fence(memory_order_seq_cst);', 'HAVE_ATOMIC_THREAD_FENCE', headers='stdatomic.h', msg='Checking for atomic_thread_fence(memory_order_seq_cst) in stdatomic.h') if not tf: tf = conf.CHECK_CODE('__atomic_thread_fence(__ATOMIC_SEQ_CST);', 'HAVE___ATOMIC_THREAD_FENCE', msg='Checking for __atomic_thread_fence(__ATOMIC_SEQ_CST)') if not tf: # __sync_synchronize() is available since 2005 in gcc. tf = conf.CHECK_CODE('__sync_synchronize();', 'HAVE___SYNC_SYNCHRONIZE', msg='Checking for __sync_synchronize') if tf: conf.DEFINE('HAVE_ATOMIC_THREAD_FENCE_SUPPORT', 1) conf.CHECK_CODE(''' #define FALL_THROUGH __attribute__((fallthrough)) enum direction_e { UP = 0, DOWN, }; int main(void) { enum direction_e key = UP; int i = 10; int j = 0; switch (key) { case UP: i = 5; FALL_THROUGH; case DOWN: j = i * 2; break; default: break; } if (j < i) { return 1; } return 0; } ''', 'HAVE_FALLTHROUGH_ATTRIBUTE', addmain=False, strict=True, cflags=['-Werror=missing-declarations'], msg='Checking for fallthrough attribute') # these may be builtins, so we need the link=False strategy conf.CHECK_FUNCS('strdup memmem printf memset memcpy memmove strcpy strncpy bzero', link=False) # See https://bugzilla.samba.org/show_bug.cgi?id=1097 # # Ported in from autoconf where it was added with this commit: # commit 804cfb20a067b4b687089dc72a8271b3abf20f31 # Author: Simo Sorce # Date: Wed Aug 25 14:24:16 2004 +0000 # r2070: Let's try to overload srnlen and strndup for AIX where they are natly broken. host_os = sys.platform if host_os.rfind('aix') > -1: conf.DEFINE('BROKEN_STRNLEN', 1) conf.DEFINE('BROKEN_STRNDUP', 1) conf.CHECK_FUNCS('shl_load shl_unload shl_findsym') conf.CHECK_FUNCS('pipe strftime srandom random srand rand usleep setbuffer') conf.CHECK_FUNCS('lstat getpgrp utime utimes setuid seteuid setreuid setresuid setgid setegid') conf.CHECK_FUNCS('setregid setresgid chroot strerror vsyslog setlinebuf mktime') conf.CHECK_FUNCS('ftruncate chsize rename waitpid wait4') conf.CHECK_FUNCS('initgroups pread pwrite strndup strcasestr strsep') conf.CHECK_FUNCS('strtok_r mkdtemp dup2 dprintf vdprintf isatty chown lchown') conf.CHECK_FUNCS('link readlink symlink realpath snprintf vsnprintf') conf.CHECK_FUNCS('asprintf vasprintf setenv unsetenv strnlen strtoull __strtoull') conf.CHECK_FUNCS('strtouq strtoll __strtoll strtoq memalign posix_memalign') conf.CHECK_FUNCS('fmemopen') if conf.CONFIG_SET('HAVE_MEMALIGN'): conf.CHECK_DECLS('memalign', headers='malloc.h') # glibc up to 2.3.6 had dangerously broken posix_fallocate(). DON'T USE IT. if conf.CHECK_CODE(''' #define _XOPEN_SOURCE 600 #include #if defined(__GLIBC__) && ((__GLIBC__ < 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 4)) #error probably broken posix_fallocate #endif ''', '_POSIX_FALLOCATE_CAPABLE_LIBC', msg='Checking for posix_fallocate-capable libc'): conf.CHECK_FUNCS('posix_fallocate') conf.CHECK_FUNCS('prctl dirname basename') strlcpy_in_bsd = False # libbsd on some platforms provides strlcpy and strlcat if not conf.CHECK_FUNCS('strlcpy strlcat'): if conf.CHECK_FUNCS_IN('strlcpy strlcat', 'bsd', headers='bsd/string.h', checklibc=True): strlcpy_in_bsd = True if not conf.CHECK_FUNCS('getpeereid'): conf.CHECK_FUNCS_IN('getpeereid', 'bsd', headers='sys/types.h bsd/unistd.h') if not conf.CHECK_FUNCS_IN('setproctitle', 'setproctitle', headers='setproctitle.h'): conf.CHECK_FUNCS_IN('setproctitle', 'bsd', headers='sys/types.h bsd/unistd.h') if not conf.CHECK_FUNCS('setproctitle_init'): conf.CHECK_FUNCS_IN('setproctitle_init', 'bsd', headers='sys/types.h bsd/unistd.h') if not conf.CHECK_FUNCS('closefrom'): conf.CHECK_FUNCS_IN('closefrom', 'bsd', headers='bsd/unistd.h') conf.CHECK_CODE(''' struct ucred cred; socklen_t cred_len; int ret = getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, &cred_len);''', 'HAVE_PEERCRED', msg="Checking whether we can use SO_PEERCRED to get socket credentials", headers='sys/types.h sys/socket.h') #Some OS (ie. freebsd) return EINVAL if the conversion could not be done, it's not what we expect #Let's detect those cases if conf.CONFIG_SET('HAVE_STRTOLL'): conf.CHECK_CODE(''' long long nb = strtoll("Text", NULL, 0); if (errno == EINVAL) { return 0; } else { return 1; } ''', msg="Checking correct behavior of strtoll", headers = 'errno.h', execute = True, define = 'HAVE_BSD_STRTOLL', ) conf.CHECK_FUNCS('if_nametoindex strerror_r') conf.CHECK_FUNCS('syslog') conf.CHECK_FUNCS('gai_strerror get_current_dir_name') conf.CHECK_FUNCS('timegm getifaddrs freeifaddrs mmap setgroups syscall setsid') conf.CHECK_FUNCS('getgrent_r getgrgid_r getgrnam_r getgrouplist getpagesize') conf.CHECK_FUNCS('getpwent_r getpwnam_r getpwuid_r epoll_create') conf.CHECK_FUNCS('port_create') conf.CHECK_FUNCS('getprogname') if not conf.CHECK_FUNCS('copy_file_range'): conf.CHECK_CODE(''' #include #include syscall(SYS_copy_file_range,0,NULL,0,NULL,0,0); ''', 'HAVE_SYSCALL_COPY_FILE_RANGE', msg='Checking whether we have copy_file_range system call') if conf.CONFIG_SET('HAVE_COPY_FILE_RANGE') or conf.CONFIG_SET('HAVE_SYSCALL_COPY_FILE_RANGE'): conf.DEFINE('USE_COPY_FILE_RANGE', 1) conf.SET_TARGET_TYPE('attr', 'EMPTY') xattr_headers='sys/attributes.h attr/xattr.h sys/xattr.h' # default to 1, we set it to 0 if we don't find any EA implementation below: conf.DEFINE('HAVE_XATTR_SUPPORT', 1) if conf.CHECK_FUNCS_IN('getxattr', 'attr', checklibc=True, headers=xattr_headers): conf.DEFINE('HAVE_XATTR_XATTR', 1) # Darwin has extra options to xattr-family functions conf.CHECK_CODE('getxattr(NULL, NULL, NULL, 0, 0, 0)', headers=xattr_headers, local_include=False, define='XATTR_ADDITIONAL_OPTIONS', msg="Checking whether xattr interface takes additional options") elif conf.CHECK_FUNCS_IN('attr_listf', 'attr', checklibc=True, headers=xattr_headers): conf.DEFINE('HAVE_XATTR_ATTR', 1) elif conf.CHECK_FUNCS('extattr_list_fd'): conf.DEFINE('HAVE_XATTR_EXTATTR', 1) elif conf.CHECK_FUNCS('flistea'): conf.DEFINE('HAVE_XATTR_EA', 1) elif not conf.CHECK_FUNCS('attropen'): conf.DEFINE('HAVE_XATTR_SUPPORT', 0) conf.CHECK_FUNCS_IN('dlopen dlsym dlerror dlclose', 'dl', checklibc=True, headers='dlfcn.h dl.h') conf.CHECK_C_PROTOTYPE('dlopen', 'void *dlopen(const char* filename, unsigned int flags)', define='DLOPEN_TAKES_UNSIGNED_FLAGS', headers='dlfcn.h dl.h') # # Check for clock_gettime and fdatasync # # First check libc to avoid linking libreplace against librt. # if conf.CHECK_FUNCS('fdatasync'): # some systems are missing the declaration conf.CHECK_DECLS('fdatasync') else: if conf.CHECK_FUNCS_IN('fdatasync', 'rt'): # some systems are missing the declaration conf.CHECK_DECLS('fdatasync') has_clock_gettime = False if conf.CHECK_FUNCS('clock_gettime'): has_clock_gettime = True if not has_clock_gettime: if conf.CHECK_FUNCS_IN('clock_gettime', 'rt', checklibc=True): has_clock_gettime = True if has_clock_gettime: for c in ['CLOCK_MONOTONIC', 'CLOCK_PROCESS_CPUTIME_ID', 'CLOCK_REALTIME']: conf.CHECK_CODE(''' #if TIME_WITH_SYS_TIME # include # include #else # if HAVE_SYS_TIME_H # include # else # include # endif #endif clockid_t clk = %s''' % c, 'HAVE_%s' % c, msg='Checking whether the clock_gettime clock ID %s is available' % c) conf.CHECK_TYPE('struct timespec', headers='sys/time.h time.h') # these headers need to be tested as a group on freebsd conf.CHECK_HEADERS(headers='sys/socket.h net/if.h', together=True) conf.CHECK_HEADERS(headers='netinet/in.h arpa/nameser.h resolv.h', together=True) conf.CHECK_FUNCS_IN('res_search', 'resolv', checklibc=True, headers='netinet/in.h arpa/nameser.h resolv.h') # try to find libintl (if --without-gettext is not given) conf.env.intl_libs='' if not Options.options.disable_gettext: conf.CHECK_HEADERS('libintl.h') conf.CHECK_LIB('intl') conf.CHECK_DECLS('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', headers="libintl.h") # *textdomain functions are not strictly necessary conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', '', checklibc=True, headers='libintl.h') # gettext and dgettext must exist # on some systems (the ones with glibc, those are in libc) if conf.CHECK_FUNCS_IN('dgettext gettext', '', checklibc=True, headers='libintl.h'): # save for dependency definitions conf.env.intl_libs='' # others (e.g. FreeBSD) have separate libintl elif conf.CHECK_FUNCS_IN('dgettext gettext', 'intl', checklibc=False, headers='libintl.h'): # save for dependency definitions conf.env.intl_libs='intl' # recheck with libintl conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', 'intl', checklibc=False, headers='libintl.h') else: # Some hosts need lib iconv for linking with lib intl # So we try with flags just in case it helps. oldflags = list(conf.env['EXTRA_LDFLAGS']); conf.env['EXTRA_LDFLAGS'].extend(["-liconv"]) conf.CHECK_FUNCS_IN('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', 'intl', checklibc=False, headers='libintl.h') conf.env['EXTRA_LDFLAGS'] = oldflags if conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DGETTEXT']: # save for dependency definitions conf.env.intl_libs='iconv intl' # did we find both prototypes and a library to link against? # if not, unset the detected values (see Bug #9911) if not (conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DECL_GETTEXT']): conf.undefine('HAVE_GETTEXT') conf.undefine('HAVE_DECL_GETTEXT') if not (conf.env['HAVE_DGETTEXT'] and conf.env['HAVE_DECL_DGETTEXT']): conf.undefine('HAVE_DGETTEXT') conf.undefine('HAVE_DECL_DGETTEXT') conf.CHECK_FUNCS_IN('pthread_create', 'pthread', checklibc=True, headers='pthread.h') PTHREAD_CFLAGS='error' PTHREAD_LDFLAGS='error' if PTHREAD_LDFLAGS == 'error': # Check if pthread_attr_init() is provided by libc first! if conf.CHECK_FUNCS('pthread_attr_init'): PTHREAD_CFLAGS='-D_REENTRANT' PTHREAD_LDFLAGS='' if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthread'): PTHREAD_CFLAGS='-D_REENTRANT -D_POSIX_PTHREAD_SEMANTICS' PTHREAD_LDFLAGS='-lpthread' if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthreads'): PTHREAD_CFLAGS='-D_THREAD_SAFE' PTHREAD_LDFLAGS='-lpthreads' if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_attr_init', 'c_r'): PTHREAD_CFLAGS='-D_THREAD_SAFE -pthread' PTHREAD_LDFLAGS='-pthread' # especially for HP-UX, where the CHECK_FUNC macro fails to test for # pthread_attr_init. On pthread_mutex_lock it works there... if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_mutex_lock', 'pthread'): PTHREAD_CFLAGS='-D_REENTRANT' PTHREAD_LDFLAGS='-lpthread' if PTHREAD_CFLAGS != 'error' and PTHREAD_LDFLAGS != 'error': if conf.CONFIG_SET('replace_add_global_pthread'): conf.ADD_CFLAGS(PTHREAD_CFLAGS) conf.ADD_LDFLAGS(PTHREAD_LDFLAGS) conf.CHECK_HEADERS('pthread.h') conf.DEFINE('HAVE_PTHREAD', '1') if conf.CONFIG_SET('HAVE_PTHREAD'): conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust', 'pthread', checklibc=True, headers='pthread.h') if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST'): conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust_np', 'pthread', checklibc=True, headers='pthread.h') conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST', headers='pthread.h') if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST'): conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST_NP', headers='pthread.h') conf.CHECK_FUNCS_IN('pthread_mutex_consistent', 'pthread', checklibc=True, headers='pthread.h') if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT'): conf.CHECK_FUNCS_IN('pthread_mutex_consistent_np', 'pthread', checklibc=True, headers='pthread.h') if ((conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST') or conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP')) and (conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST') or conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP')) and (conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT') or conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT_NP'))): conf.DEFINE('HAVE_ROBUST_MUTEXES', 1) # __thread is available since 2002 in gcc. conf.CHECK_CODE(''' __thread int tls; int main(void) { return 0; } ''', 'HAVE___THREAD', addmain=False, msg='Checking for __thread local storage') conf.CHECK_FUNCS_IN('crypt', 'crypt', checklibc=True) conf.CHECK_FUNCS_IN('crypt_r', 'crypt', checklibc=True) conf.CHECK_FUNCS_IN('crypt_rn', 'crypt', checklibc=True) conf.CHECK_VARIABLE('rl_event_hook', define='HAVE_DECL_RL_EVENT_HOOK', always=True, headers='readline.h readline/readline.h readline/history.h') conf.CHECK_VARIABLE('program_invocation_short_name', headers='errno.h') conf.CHECK_DECLS('snprintf vsnprintf asprintf vasprintf') conf.CHECK_DECLS('errno', headers='errno.h', reverse=True) conf.CHECK_DECLS('EWOULDBLOCK', headers='errno.h') conf.CHECK_DECLS('environ', reverse=True, headers='unistd.h') conf.CHECK_DECLS('getgrent_r getpwent_r', reverse=True, headers='pwd.h grp.h') conf.CHECK_DECLS('pread pwrite setenv setresgid setresuid', reverse=True) if conf.CONFIG_SET('HAVE_EPOLL_CREATE') and conf.CONFIG_SET('HAVE_SYS_EPOLL_H'): conf.DEFINE('HAVE_EPOLL', 1) if conf.CONFIG_SET('HAVE_PORT_CREATE') and conf.CONFIG_SET('HAVE_PORT_H'): conf.DEFINE('HAVE_SOLARIS_PORTS', 1) if conf.CHECK_FUNCS('eventfd', headers='sys/eventfd.h'): conf.DEFINE('HAVE_EVENTFD', 1) conf.CHECK_HEADERS('poll.h') conf.CHECK_FUNCS('poll') conf.CHECK_FUNCS('strptime') conf.CHECK_DECLS('strptime', headers='time.h') conf.CHECK_CODE('''#define LIBREPLACE_CONFIGURE_TEST_STRPTIME #include "tests/strptime.c"''', define='HAVE_WORKING_STRPTIME', execute=True, addmain=False, msg='Checking for working strptime') conf.CHECK_C_PROTOTYPE('gettimeofday', 'int gettimeofday(struct timeval *tv, struct timezone *tz)', define='HAVE_GETTIMEOFDAY_TZ', headers='sys/time.h') conf.CHECK_C_PROTOTYPE('gettimeofday', 'int gettimeofday(struct timeval *tv, void *tz)', define='HAVE_GETTIMEOFDAY_TZ_VOID', headers='sys/time.h') conf.CHECK_CODE('#include "tests/snprintf.c"', define="HAVE_C99_VSNPRINTF", execute=True, addmain=False, msg="Checking for C99 vsnprintf") conf.CHECK_CODE('#include "tests/shared_mmap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_SHARED_MMAP', msg="Checking for HAVE_SHARED_MMAP") conf.CHECK_CODE('#include "tests/shared_mremap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_MREMAP', msg="Checking for HAVE_MREMAP") # OpenBSD (and I've heard HPUX) doesn't sync between mmap and write. # FIXME: Anything other than a 0 or 1 exit code should abort configure! conf.CHECK_CODE('#include "tests/incoherent_mmap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_INCOHERENT_MMAP', msg="Checking for HAVE_INCOHERENT_MMAP") conf.SAMBA_BUILD_ENV() conf.CHECK_CODE(''' typedef struct {unsigned x;} FOOBAR; #define X_FOOBAR(x) ((FOOBAR) { x }) #define FOO_ONE X_FOOBAR(1) FOOBAR f = FOO_ONE; static const struct { FOOBAR y; } f2[] = { {FOO_ONE} }; static const FOOBAR f3[] = {FOO_ONE}; ''', define='HAVE_IMMEDIATE_STRUCTURES') conf.CHECK_CODE('mkdir("foo",0777)', define='HAVE_MKDIR_MODE', headers='sys/stat.h') # we need the st_rdev test under two names conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_STRUCT_STAT_ST_RDEV', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_ST_RDEV', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', 'ss_family', headers='sys/socket.h netinet/in.h') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', '__ss_family', headers='sys/socket.h netinet/in.h') if conf.CHECK_STRUCTURE_MEMBER('struct sockaddr', 'sa_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCKADDR_SA_LEN'): # the old build system produced both defines conf.DEFINE('HAVE_STRUCT_SOCKADDR_SA_LEN', 1) conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in', 'sin_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCK_SIN_LEN') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in6', 'sin6_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCK_SIN6_LEN') conf.CHECK_CODE('struct sockaddr_un sunaddr; sunaddr.sun_family = AF_UNIX;', define='HAVE_UNIXSOCKET', headers='sys/socket.h sys/un.h') conf.CHECK_CODE(''' struct stat st; char tpl[20]="/tmp/test.XXXXXX"; char tpl2[20]="/tmp/test.XXXXXX"; int fd = mkstemp(tpl); int fd2 = mkstemp(tpl2); if (fd == -1) { if (fd2 != -1) { unlink(tpl2); } exit(1); } if (fd2 == -1) exit(1); unlink(tpl); unlink(tpl2); if (fstat(fd, &st) != 0) exit(1); if ((st.st_mode & 0777) != 0600) exit(1); if (strcmp(tpl, "/tmp/test.XXXXXX") == 0) { exit(1); } if (strcmp(tpl, tpl2) == 0) { exit(1); } exit(0); ''', define='HAVE_SECURE_MKSTEMP', execute=True, mandatory=True) # lets see if we get a mandatory failure for this one # look for a method of finding the list of network interfaces for method in ['HAVE_IFACE_GETIFADDRS', 'HAVE_IFACE_AIX', 'HAVE_IFACE_IFCONF', 'HAVE_IFACE_IFREQ']: bsd_for_strlcpy = '' if strlcpy_in_bsd: bsd_for_strlcpy = ' bsd' if conf.CHECK_CODE(''' #define %s 1 #define NO_CONFIG_H 1 #define AUTOCONF_TEST 1 #include "replace.c" #include "inet_ntop.c" #include "snprintf.c" #include "getifaddrs.c" #define getifaddrs_test main #include "tests/getifaddrs.c" ''' % method, method, lib='nsl socket' + bsd_for_strlcpy, addmain=False, execute=True): break conf.RECURSE('system') conf.SAMBA_CONFIG_H() if conf.CHECK_FUNCS('strerror_r'): # Check if strerror_r is XSI-Compatable, the default GNU implementation # is not conf.CHECK_CODE('int strerror_r(int errnum, char *buf, size_t buflen);', 'STRERROR_R_XSI_NOT_GNU', headers='string.h', addmain=False, link=False, msg="Checking for XSI (rather than GNU) prototype for strerror_r") REPLACEMENT_FUNCTIONS = { 'replace.c': ['ftruncate', 'strlcpy', 'strlcat', 'mktime', 'initgroups', 'memmove', 'strdup', 'setlinebuf', 'vsyslog', 'strnlen', 'strndup', 'waitpid', 'seteuid', 'setegid', 'chroot', 'mkstemp', 'mkdtemp', 'pread', 'pwrite', 'strcasestr', 'strsep', 'strtok_r', 'strtoll', 'strtoull', 'setenv', 'unsetenv', 'utime', 'utimes', 'dup2', 'chown', 'link', 'readlink', 'symlink', 'lchown', 'realpath', 'memmem', 'vdprintf', 'dprintf', 'get_current_dir_name', 'copy_file_range', 'strerror_r', 'clock_gettime', 'memset_s'], 'timegm.c': ['timegm'], # Note: C99_VSNPRINTF is not a function, but a special condition # for replacement 'snprintf.c': ['C99_VSNPRINTF', 'snprintf', 'vsnprintf', 'asprintf', 'vasprintf'], # Note: WORKING_STRPTIME is not a function, but a special condition # for replacement 'strptime.c': ['WORKING_STRPTIME', 'strptime'], } def build(bld): bld.RECURSE('buildtools/wafsamba') REPLACE_HOSTCC_SOURCE = '' for filename in REPLACEMENT_FUNCTIONS.keys(): for function in REPLACEMENT_FUNCTIONS[filename]: if not bld.CONFIG_SET('HAVE_%s' % function.upper()): REPLACE_HOSTCC_SOURCE += ' %s' % filename break extra_libs = '' if bld.CONFIG_SET('HAVE_LIBBSD'): extra_libs += ' bsd' if bld.CONFIG_SET('HAVE_LIBRT'): extra_libs += ' rt' if bld.CONFIG_SET('REPLACE_REQUIRES_LIBSOCKET_LIBNSL'): extra_libs += ' socket nsl' if not bld.CONFIG_SET('HAVE_CLOSEFROM'): REPLACE_HOSTCC_SOURCE += ' closefrom.c' bld.SAMBA_SUBSYSTEM('LIBREPLACE_HOSTCC', REPLACE_HOSTCC_SOURCE, use_hostcc=True, use_global_deps=False, group='hostcc_base_build_main', deps = extra_libs ) REPLACE_SOURCE = REPLACE_HOSTCC_SOURCE REPLACE_SOURCE += ' cwrap.c' if not bld.CONFIG_SET('HAVE_DLOPEN'): REPLACE_SOURCE += ' dlfcn.c' if not bld.CONFIG_SET('HAVE_POLL'): REPLACE_SOURCE += ' poll.c' if not bld.CONFIG_SET('HAVE_SOCKETPAIR'): REPLACE_SOURCE += ' socketpair.c' if not bld.CONFIG_SET('HAVE_CONNECT'): REPLACE_SOURCE += ' socket.c' if not bld.CONFIG_SET('HAVE_GETIFADDRS'): REPLACE_SOURCE += ' getifaddrs.c' if not bld.CONFIG_SET('HAVE_GETADDRINFO'): REPLACE_SOURCE += ' getaddrinfo.c' if not bld.CONFIG_SET('HAVE_INET_NTOA'): REPLACE_SOURCE += ' inet_ntoa.c' if not bld.CONFIG_SET('HAVE_INET_ATON'): REPLACE_SOURCE += ' inet_aton.c' if not bld.CONFIG_SET('HAVE_INET_NTOP'): REPLACE_SOURCE += ' inet_ntop.c' if not bld.CONFIG_SET('HAVE_INET_PTON'): REPLACE_SOURCE += ' inet_pton.c' if not bld.CONFIG_SET('HAVE_GETXATTR') or bld.CONFIG_SET('XATTR_ADDITIONAL_OPTIONS'): REPLACE_SOURCE += ' xattr.c' if not bld.CONFIG_SET('HAVE_CLOSEFROM'): REPLACE_SOURCE += ' closefrom.c' bld.SAMBA_LIBRARY('replace', source=REPLACE_SOURCE, group='base_libraries', # FIXME: Ideally symbols should be hidden here so they # don't appear in the global namespace when Samba # libraries are loaded, but this doesn't appear to work # at the moment: # hide_symbols=bld.BUILTIN_LIBRARY('replace'), private_library=True, deps='dl attr' + extra_libs) replace_test_cflags = '' if bld.CONFIG_SET('HAVE_WNO_FORMAT_TRUNCATION'): replace_test_cflags += " -Wno-format-truncation" bld.SAMBA_SUBSYSTEM('replace-test', source='''tests/testsuite.c tests/strptime.c tests/os2_delete.c tests/getifaddrs.c''', deps='replace', cflags=replace_test_cflags) bld.SAMBA_BINARY('replace_testsuite', source='tests/main.c', deps='replace replace-test', install=False) # build replacements for stdint.h and stdbool.h if needed bld.SAMBA_GENERATOR('replace_stdint_h', rule='cp ${SRC} ${TGT}', source='hdr_replace.h', target='stdint.h', enabled = not bld.CONFIG_SET('HAVE_STDINT_H')) bld.SAMBA_GENERATOR('replace_stdbool_h', rule='cp ${SRC} ${TGT}', source='hdr_replace.h', target='stdbool.h', enabled = not bld.CONFIG_SET('HAVE_STDBOOL_H')) bld.SAMBA_SUBSYSTEM('samba_intl', source='', use_global_deps=False,deps=bld.env.intl_libs) def testonly(ctx): '''run talloc testsuite''' import samba_utils samba_utils.ADD_LD_LIBRARY_PATH('bin/shared') samba_utils.ADD_LD_LIBRARY_PATH('bin/shared/private') cmd = os.path.join(Context.g_module.out, 'replace_testsuite') ret = samba_utils.RUN_COMMAND(cmd) print("testsuite returned %d" % ret) sys.exit(ret) # WAF doesn't build the unit tests for this, maybe because they don't link with talloc? # This forces it def test(ctx): Options.commands.append('build') Options.commands.append('testonly') def dist(): '''makes a tarball for distribution''' samba_dist.dist() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/lib/replace/xattr.c0000660000000000000000000005267500000000000016541 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. replacement routines for xattr implementations Copyright (C) Jeremy Allison 1998-2005 Copyright (C) Timur Bakeyev 2005 Copyright (C) Bjoern Jacke 2006-2007 Copyright (C) Herb Lewis 2003 Copyright (C) Andrew Bartlett 2012 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #define UID_WRAPPER_NOT_REPLACE #include "replace.h" #include "system/filesys.h" #include "system/dir.h" /******** Solaris EA helper function prototypes ********/ #ifdef HAVE_ATTROPEN #define SOLARIS_ATTRMODE S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP static int solaris_write_xattr(int attrfd, const char *value, size_t size); static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size); static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size); static int solaris_unlinkat(int attrdirfd, const char *name); static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode); static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode); #endif /************************************************************************** Wrappers for extented attribute calls. Based on the Linux package with support for IRIX and (Net|Free)BSD also. Expand as other systems have them. ****************************************************************************/ ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return getxattr(path, name, value, size); #else /* So that we do not recursively call this function */ #undef getxattr int options = 0; return getxattr(path, name, value, size, 0, options); #endif #elif defined(HAVE_XATTR_EA) return getea(path, name, value, size); #elif defined(HAVE_XATTR_EXTATTR) ssize_t retval; int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } /* * The BSD implementation has a nasty habit of silently truncating * the returned value to the size of the buffer, so we have to check * that the buffer is large enough to fit the returned value. */ if((retval=extattr_get_file(path, attrnamespace, attrname, NULL, 0)) >= 0) { if (size == 0) { return retval; } else if (retval > size) { errno = ERANGE; return -1; } if((retval=extattr_get_file(path, attrnamespace, attrname, value, size)) >= 0) return retval; } return -1; #elif defined(HAVE_XATTR_ATTR) int retval, flags = 0; int valuelength = (int)size; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; retval = attr_get(path, attrname, (char *)value, &valuelength, flags); if (size == 0 && retval == -1 && errno == E2BIG) { return valuelength; } return retval ? retval : valuelength; #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrfd = solaris_attropen(path, name, O_RDONLY, 0); if (attrfd >= 0) { ret = solaris_read_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fgetxattr(filedes, name, value, size); #else /* So that we do not recursively call this function */ #undef fgetxattr int options = 0; return fgetxattr(filedes, name, value, size, 0, options); #endif #elif defined(HAVE_XATTR_EA) return fgetea(filedes, name, value, size); #elif defined(HAVE_XATTR_EXTATTR) ssize_t retval; int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } if((retval=extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0)) >= 0) { if (size == 0) { return retval; } else if (retval > size) { errno = ERANGE; return -1; } if((retval=extattr_get_fd(filedes, attrnamespace, attrname, value, size)) >= 0) return retval; } return -1; #elif defined(HAVE_XATTR_ATTR) int retval, flags = 0; int valuelength = (int)size; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; retval = attr_getf(filedes, attrname, (char *)value, &valuelength, flags); if (size == 0 && retval == -1 && errno == E2BIG) { return valuelength; } return retval ? retval : valuelength; #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrfd = solaris_openat(filedes, name, O_RDONLY|O_XATTR, 0); if (attrfd >= 0) { ret = solaris_read_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } #if defined(HAVE_XATTR_EXTATTR) #define EXTATTR_PREFIX(s) (s), (sizeof((s))-1) static struct { int space; const char *name; size_t len; } extattr[] = { { EXTATTR_NAMESPACE_SYSTEM, EXTATTR_PREFIX("system.") }, { EXTATTR_NAMESPACE_USER, EXTATTR_PREFIX("user.") }, }; typedef union { const char *path; int filedes; } extattr_arg; static ssize_t bsd_attr_list (int type, extattr_arg arg, char *list, size_t size) { ssize_t list_size, total_size = 0; int i, t, len; char *buf; /* Iterate through extattr(2) namespaces */ for(t = 0; t < ARRAY_SIZE(extattr); t++) { if (t != EXTATTR_NAMESPACE_USER && geteuid() != 0) { /* ignore all but user namespace when we are not root, see bug 10247 */ continue; } switch(type) { case 0: list_size = extattr_list_file(arg.path, extattr[t].space, list, size); break; case 1: list_size = extattr_list_link(arg.path, extattr[t].space, list, size); break; case 2: list_size = extattr_list_fd(arg.filedes, extattr[t].space, list, size); break; default: errno = ENOSYS; return -1; } /* Some error happend. Errno should be set by the previous call */ if(list_size < 0) return -1; /* No attributes */ if(list_size == 0) continue; /* XXX: Call with an empty buffer may be used to calculate necessary buffer size. Unfortunately, we can't say, how many attributes were returned, so here is the potential problem with the emulation. */ if(list == NULL) { /* Take the worse case of one char attribute names - two bytes per name plus one more for sanity. */ total_size += list_size + (list_size/2 + 1)*extattr[t].len; continue; } /* Count necessary offset to fit namespace prefixes */ len = 0; for(i = 0; i < list_size; i += list[i] + 1) len += extattr[t].len; total_size += list_size + len; /* Buffer is too small to fit the results */ if(total_size > size) { errno = ERANGE; return -1; } /* Shift results back, so we can prepend prefixes */ buf = (char *)memmove(list + len, list, list_size); for(i = 0; i < list_size; i += len + 1) { len = buf[i]; strncpy(list, extattr[t].name, extattr[t].len + 1); list += extattr[t].len; strncpy(list, buf + i + 1, len); list[len] = '\0'; list += len + 1; } size -= total_size; } return total_size; } #endif #if defined(HAVE_XATTR_ATTR) && (defined(HAVE_SYS_ATTRIBUTES_H) || defined(HAVE_ATTR_ATTRIBUTES_H)) static char attr_buffer[ATTR_MAX_VALUELEN]; static ssize_t irix_attr_list(const char *path, int filedes, char *list, size_t size, int flags) { int retval = 0, index; attrlist_cursor_t *cursor = 0; int total_size = 0; attrlist_t * al = (attrlist_t *)attr_buffer; attrlist_ent_t *ae; size_t ent_size, left = size; char *bp = list; while (true) { if (filedes) retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); else retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); if (retval) break; for (index = 0; index < al->al_count; index++) { ae = ATTR_ENTRY(attr_buffer, index); ent_size = strlen(ae->a_name) + sizeof("user."); if (left >= ent_size) { strncpy(bp, "user.", sizeof("user.")); strncat(bp, ae->a_name, ent_size - sizeof("user.")); bp += ent_size; left -= ent_size; } else if (size) { errno = ERANGE; retval = -1; break; } total_size += ent_size; } if (al->al_more == 0) break; } if (retval == 0) { flags |= ATTR_ROOT; cursor = 0; while (true) { if (filedes) retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); else retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); if (retval) break; for (index = 0; index < al->al_count; index++) { ae = ATTR_ENTRY(attr_buffer, index); ent_size = strlen(ae->a_name) + sizeof("system."); if (left >= ent_size) { strncpy(bp, "system.", sizeof("system.")); strncat(bp, ae->a_name, ent_size - sizeof("system.")); bp += ent_size; left -= ent_size; } else if (size) { errno = ERANGE; retval = -1; break; } total_size += ent_size; } if (al->al_more == 0) break; } } return (ssize_t)(retval ? retval : total_size); } #endif ssize_t rep_listxattr (const char *path, char *list, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return listxattr(path, list, size); #else /* So that we do not recursively call this function */ #undef listxattr int options = 0; return listxattr(path, list, size, options); #endif #elif defined(HAVE_XATTR_EA) return listea(path, list, size); #elif defined(HAVE_XATTR_EXTATTR) extattr_arg arg; arg.path = path; return bsd_attr_list(0, arg, list, size); #elif defined(HAVE_XATTR_ATTR) && defined(HAVE_SYS_ATTRIBUTES_H) return irix_attr_list(path, 0, list, size, 0); #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); if (attrdirfd >= 0) { ret = solaris_list_xattr(attrdirfd, list, size); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } ssize_t rep_flistxattr (int filedes, char *list, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return flistxattr(filedes, list, size); #else /* So that we do not recursively call this function */ #undef flistxattr int options = 0; return flistxattr(filedes, list, size, options); #endif #elif defined(HAVE_XATTR_EA) return flistea(filedes, list, size); #elif defined(HAVE_XATTR_EXTATTR) extattr_arg arg; arg.filedes = filedes; return bsd_attr_list(2, arg, list, size); #elif defined(HAVE_XATTR_ATTR) return irix_attr_list(NULL, filedes, list, size, 0); #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); if (attrdirfd >= 0) { ret = solaris_list_xattr(attrdirfd, list, size); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_removexattr (const char *path, const char *name) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return removexattr(path, name); #else /* So that we do not recursively call this function */ #undef removexattr int options = 0; return removexattr(path, name, options); #endif #elif defined(HAVE_XATTR_EA) return removeea(path, name); #elif defined(HAVE_XATTR_EXTATTR) int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } return extattr_delete_file(path, attrnamespace, attrname); #elif defined(HAVE_XATTR_ATTR) int flags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; return attr_remove(path, attrname, flags); #elif defined(HAVE_ATTROPEN) int ret = -1; int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); if (attrdirfd >= 0) { ret = solaris_unlinkat(attrdirfd, name); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_fremovexattr (int filedes, const char *name) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fremovexattr(filedes, name); #else /* So that we do not recursively call this function */ #undef fremovexattr int options = 0; return fremovexattr(filedes, name, options); #endif #elif defined(HAVE_XATTR_EA) return fremoveea(filedes, name); #elif defined(HAVE_XATTR_EXTATTR) int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } return extattr_delete_fd(filedes, attrnamespace, attrname); #elif defined(HAVE_XATTR_ATTR) int flags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; return attr_removef(filedes, attrname, flags); #elif defined(HAVE_ATTROPEN) int ret = -1; int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); if (attrdirfd >= 0) { ret = solaris_unlinkat(attrdirfd, name); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags) { int retval = -1; #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS retval = setxattr(path, name, value, size, flags); if (retval < 0) { if (errno == ENOSPC || errno == E2BIG) { errno = ENAMETOOLONG; } } return retval; #else /* So that we do not recursively call this function */ #undef setxattr retval = setxattr(path, name, value, size, 0, flags); if (retval < 0) { if (errno == E2BIG) { errno = ENAMETOOLONG; } } return retval; #endif #elif defined(HAVE_XATTR_EA) if (flags) { retval = getea(path, name, NULL, 0); if (retval < 0) { if (flags & XATTR_REPLACE && errno == ENOATTR) { return -1; } } else { if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = setea(path, name, value, size, 0); return retval; #elif defined(HAVE_XATTR_EXTATTR) int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } if (flags) { /* Check attribute existence */ retval = extattr_get_file(path, attrnamespace, attrname, NULL, 0); if (retval < 0) { /* REPLACE attribute, that doesn't exist */ if (flags & XATTR_REPLACE && errno == ENOATTR) { errno = ENOATTR; return -1; } /* Ignore other errors */ } else { /* CREATE attribute, that already exists */ if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = extattr_set_file(path, attrnamespace, attrname, value, size); return (retval < 0) ? -1 : 0; #elif defined(HAVE_XATTR_ATTR) int myflags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; retval = attr_set(path, attrname, (const char *)value, size, myflags); if (retval < 0) { if (errno == E2BIG) { errno = ENAMETOOLONG; } } return retval; #elif defined(HAVE_ATTROPEN) int myflags = O_RDWR; int attrfd; if (flags & XATTR_CREATE) myflags |= O_EXCL; if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; attrfd = solaris_attropen(path, name, myflags, (mode_t) SOLARIS_ATTRMODE); if (attrfd >= 0) { retval = solaris_write_xattr(attrfd, value, size); close(attrfd); } return retval; #else errno = ENOSYS; return -1; #endif } int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags) { int retval = -1; #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS retval = fsetxattr(filedes, name, value, size, flags); if (retval < 0) { if (errno == ENOSPC) { errno = ENAMETOOLONG; } } return retval; #else /* So that we do not recursively call this function */ #undef fsetxattr retval = fsetxattr(filedes, name, value, size, 0, flags); if (retval < 0) { if (errno == E2BIG) { errno = ENAMETOOLONG; } } return retval; #endif #elif defined(HAVE_XATTR_EA) if (flags) { retval = fgetea(filedes, name, NULL, 0); if (retval < 0) { if (flags & XATTR_REPLACE && errno == ENOATTR) { return -1; } } else { if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = fsetea(filedes, name, value, size, 0); return retval; #elif defined(HAVE_XATTR_EXTATTR) int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } if (flags) { /* Check attribute existence */ retval = extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0); if (retval < 0) { /* REPLACE attribute, that doesn't exist */ if (flags & XATTR_REPLACE && errno == ENOATTR) { errno = ENOATTR; return -1; } /* Ignore other errors */ } else { /* CREATE attribute, that already exists */ if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = extattr_set_fd(filedes, attrnamespace, attrname, value, size); return (retval < 0) ? -1 : 0; #elif defined(HAVE_XATTR_ATTR) int myflags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; return attr_setf(filedes, attrname, (const char *)value, size, myflags); #elif defined(HAVE_ATTROPEN) int myflags = O_RDWR | O_XATTR; int attrfd; if (flags & XATTR_CREATE) myflags |= O_EXCL; if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; attrfd = solaris_openat(filedes, name, myflags, (mode_t) SOLARIS_ATTRMODE); if (attrfd >= 0) { retval = solaris_write_xattr(attrfd, value, size); close(attrfd); } return retval; #else errno = ENOSYS; return -1; #endif } /************************************************************************** helper functions for Solaris' EA support ****************************************************************************/ #ifdef HAVE_ATTROPEN static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size) { struct stat sbuf; if (fstat(attrfd, &sbuf) == -1) { errno = ENOATTR; return -1; } /* This is to return the current size of the named extended attribute */ if (size == 0) { return sbuf.st_size; } /* check size and read xattr */ if (sbuf.st_size > size) { errno = ERANGE; return -1; } return read(attrfd, value, sbuf.st_size); } static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size) { ssize_t len = 0; DIR *dirp; struct dirent *de; int newfd = dup(attrdirfd); /* CAUTION: The originating file descriptor should not be used again following the call to fdopendir(). For that reason we dup() the file descriptor here to make things more clear. */ dirp = fdopendir(newfd); while ((de = readdir(dirp))) { size_t listlen = strlen(de->d_name) + 1; if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) { /* we don't want "." and ".." here: */ continue; } if (size == 0) { /* return the current size of the list of extended attribute names*/ len += listlen; } else { /* check size and copy entrieÑ• + nul into list. */ if ((len + listlen) > size) { errno = ERANGE; len = -1; break; } else { strlcpy(list + len, de->d_name, listlen); len += listlen; } } } if (closedir(dirp) == -1) { return -1; } return len; } static int solaris_unlinkat(int attrdirfd, const char *name) { if (unlinkat(attrdirfd, name, 0) == -1) { if (errno == ENOENT) { errno = ENOATTR; } return -1; } return 0; } static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode) { int filedes = attropen(path, attrpath, oflag, mode); if (filedes == -1) { if (errno == EINVAL) { errno = ENOTSUP; } else { errno = ENOATTR; } } return filedes; } static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode) { int filedes = openat(fildes, path, oflag, mode); if (filedes == -1) { if (errno == EINVAL) { errno = ENOTSUP; } else { errno = ENOATTR; } } return filedes; } static int solaris_write_xattr(int attrfd, const char *value, size_t size) { if ((ftruncate(attrfd, 0) == 0) && (write(attrfd, value, size) == size)) { return 0; } else { return -1; } } #endif /*HAVE_ATTROPEN*/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.0.6.sigs0000660000000000000000000000050000000000000021577 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.0.7.sigs0000660000000000000000000000050000000000000021600 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.0.8.sigs0000660000000000000000000000050000000000000021601 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.0.sigs0000660000000000000000000000050000000000000021572 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.1.sigs0000660000000000000000000000050000000000000021573 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.10.sigs0000660000000000000000000000150400000000000021660 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.11.sigs0000660000000000000000000000150400000000000021661 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.12.sigs0000660000000000000000000000150400000000000021662 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.13.sigs0000660000000000000000000000150400000000000021663 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.14.sigs0000660000000000000000000000150400000000000021664 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.15.sigs0000660000000000000000000000150400000000000021665 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.16.sigs0000660000000000000000000000150400000000000021666 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.2.sigs0000660000000000000000000000050000000000000021574 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.3.sigs0000660000000000000000000000050000000000000021575 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.4.sigs0000660000000000000000000000050000000000000021576 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.5.sigs0000660000000000000000000000050000000000000021577 0ustar00rootroot00000000000000pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.6.sigs0000660000000000000000000000120700000000000021605 0ustar00rootroot00000000000000_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.7.sigs0000660000000000000000000000120700000000000021606 0ustar00rootroot00000000000000_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.8.sigs0000660000000000000000000000120700000000000021607 0ustar00rootroot00000000000000_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.1.9.sigs0000660000000000000000000000150400000000000021610 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_CObject_FromTallocPtr: PyObject *(void *) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.2.0.sigs0000660000000000000000000000142100000000000021576 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.3.0.sigs0000660000000000000000000000147600000000000021611 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_name: const char *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.3.1.sigs0000660000000000000000000000147600000000000021612 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_name: const char *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/talloc/ABI/pytalloc-util-2.3.2.sigs0000660000000000000000000000147600000000000021613 0ustar00rootroot00000000000000_pytalloc_check_type: int (PyObject *, const char *) _pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) _pytalloc_get_name: const char *(PyObject *) _pytalloc_get_ptr: void *(PyObject *) _pytalloc_get_type: void *(PyObject *, const char *) pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) pytalloc_BaseObject_check: int (PyObject *) pytalloc_BaseObject_size: size_t (void) pytalloc_Check: int (PyObject *) pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) pytalloc_GetBaseObjectType: PyTypeObject *(void) pytalloc_GetObjectType: PyTypeObject *(void) pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) pytalloc_steal: PyObject *(PyTypeObject *, void *) pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.0.2.sigs0000660000000000000000000000644300000000000020263 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.0.3.sigs0000660000000000000000000000644300000000000020264 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.0.4.sigs0000660000000000000000000000644300000000000020265 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.0.5.sigs0000660000000000000000000000644300000000000020266 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.0.6.sigs0000660000000000000000000000644300000000000020267 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.0.7.sigs0000660000000000000000000000644300000000000020270 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.0.8.sigs0000660000000000000000000000652300000000000020270 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.0.sigs0000660000000000000000000000665300000000000020265 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.1.sigs0000660000000000000000000000665300000000000020266 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.10.sigs0000660000000000000000000000671500000000000020345 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.11.sigs0000660000000000000000000000671500000000000020346 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.12.sigs0000660000000000000000000000671500000000000020347 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.13.sigs0000660000000000000000000000671500000000000020350 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.14.sigs0000660000000000000000000000671500000000000020351 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.15.sigs0000660000000000000000000000671500000000000020352 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.16.sigs0000660000000000000000000000671500000000000020353 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.2.sigs0000660000000000000000000000665300000000000020267 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.3.sigs0000660000000000000000000000665300000000000020270 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.4.sigs0000660000000000000000000000671500000000000020270 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.5.sigs0000660000000000000000000000671500000000000020271 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.6.sigs0000660000000000000000000000671500000000000020272 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.7.sigs0000660000000000000000000000671500000000000020273 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.8.sigs0000660000000000000000000000671500000000000020274 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.1.9.sigs0000660000000000000000000000671500000000000020275 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/ABI/talloc-2.2.0.sigs0000660000000000000000000000671500000000000020265 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/lib/talloc/ABI/talloc-2.3.0.sigs0000660000000000000000000000671500000000000020266 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/lib/talloc/ABI/talloc-2.3.1.sigs0000660000000000000000000000671500000000000020267 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/talloc/ABI/talloc-2.3.2.sigs0000660000000000000000000000671500000000000020270 0ustar00rootroot00000000000000_talloc: void *(const void *, size_t) _talloc_array: void *(const void *, size_t, unsigned int, const char *) _talloc_free: int (void *, const char *) _talloc_get_type_abort: void *(const void *, const char *, const char *) _talloc_memdup: void *(const void *, const void *, size_t, const char *) _talloc_move: void *(const void *, const void *) _talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) _talloc_realloc: void *(const void *, void *, size_t, const char *) _talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) _talloc_reference_loc: void *(const void *, const void *, const char *) _talloc_set_destructor: void (const void *, int (*)(void *)) _talloc_steal_loc: void *(const void *, const void *, const char *) _talloc_zero: void *(const void *, size_t, const char *) _talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) talloc_asprintf: char *(const void *, const char *, ...) talloc_asprintf_append: char *(char *, const char *, ...) talloc_asprintf_append_buffer: char *(char *, const char *, ...) talloc_autofree_context: void *(void) talloc_check_name: void *(const void *, const char *) talloc_disable_null_tracking: void (void) talloc_enable_leak_report: void (void) talloc_enable_leak_report_full: void (void) talloc_enable_null_tracking: void (void) talloc_enable_null_tracking_no_autofree: void (void) talloc_find_parent_byname: void *(const void *, const char *) talloc_free_children: void (void *) talloc_get_name: const char *(const void *) talloc_get_size: size_t (const void *) talloc_increase_ref_count: int (const void *) talloc_init: void *(const char *, ...) talloc_is_parent: int (const void *, const void *) talloc_named: void *(const void *, size_t, const char *, ...) talloc_named_const: void *(const void *, size_t, const char *) talloc_parent: void *(const void *) talloc_parent_name: const char *(const void *) talloc_pool: void *(const void *, size_t) talloc_realloc_fn: void *(const void *, void *, size_t) talloc_reference_count: size_t (const void *) talloc_reparent: void *(const void *, const void *, const void *) talloc_report: void (const void *, FILE *) talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) talloc_report_depth_file: void (const void *, int, int, FILE *) talloc_report_full: void (const void *, FILE *) talloc_set_abort_fn: void (void (*)(const char *)) talloc_set_log_fn: void (void (*)(const char *)) talloc_set_log_stderr: void (void) talloc_set_memlimit: int (const void *, size_t) talloc_set_name: const char *(const void *, const char *, ...) talloc_set_name_const: void (const void *, const char *) talloc_show_parents: void (const void *, FILE *) talloc_strdup: char *(const void *, const char *) talloc_strdup_append: char *(char *, const char *) talloc_strdup_append_buffer: char *(char *, const char *) talloc_strndup: char *(const void *, const char *, size_t) talloc_strndup_append: char *(char *, const char *, size_t) talloc_strndup_append_buffer: char *(char *, const char *, size_t) talloc_test_get_magic: int (void) talloc_total_blocks: size_t (const void *) talloc_total_size: size_t (const void *) talloc_unlink: int (const void *, void *) talloc_vasprintf: char *(const void *, const char *, va_list) talloc_vasprintf_append: char *(char *, const char *, va_list) talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) talloc_version_major: int (void) talloc_version_minor: int (void) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/Makefile0000660000000000000000000000165600000000000016527 0ustar00rootroot00000000000000# simple makefile wrapper to run waf WAF_BIN=`PATH=buildtools/bin:../../buildtools/bin:$$PATH which waf` WAF_BINARY=$(PYTHON) $(WAF_BIN) WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: touch .tmplock WAFLOCK=.tmplock $(WAF) dist distcheck: touch .tmplock WAFLOCK=.tmplock $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags pydoctor: $(WAF) pydoctor bin/%:: FORCE $(WAF) --targets=`basename $@` FORCE: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/NEWS0000660000000000000000000000034000000000000015553 0ustar00rootroot000000000000001.0.1 26 May 2007 BUGS * Set name of correctly when using talloc_append_string() (metze) LICENSE * Change license of files in lib/replace to LGPL (was GPL). (jelmer) 1.0.0 30 April 2007 Initial release. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/compat/talloc_compat1.c0000660000000000000000000000312500000000000021411 0ustar00rootroot00000000000000/* Samba trivial allocation library - compat functions Copyright (C) Stefan Metzmacher 2009 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* * This file contains only function to build a * compat talloc.so.1 library on top of talloc.so.2 */ #include "replace.h" #include "talloc.h" void *_talloc_reference(const void *context, const void *ptr); void *_talloc_reference(const void *context, const void *ptr) { return _talloc_reference_loc(context, ptr, "Called from talloc compat1 " "_talloc_reference"); } void *_talloc_steal(const void *new_ctx, const void *ptr); void *_talloc_steal(const void *new_ctx, const void *ptr) { return talloc_reparent(talloc_parent(ptr), new_ctx, ptr); } #undef talloc_free int talloc_free(void *ptr); int talloc_free(void *ptr) { return talloc_unlink(talloc_parent(ptr), ptr); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/compat/talloc_compat1.mk0000660000000000000000000000122600000000000021576 0ustar00rootroot00000000000000talloccompatdir := $(tallocdir)/compat TALLOC_COMPAT1_VERSION_MAJOR = 1 TALLOC_COMPAT1_OBJ = $(talloccompatdir)/talloc_compat1.o TALLOC_COMPAT1_SOLIB = libtalloc-compat1-$(TALLOC_VERSION).$(SHLIBEXT) TALLOC_COMPAT1_SONAME = libtalloc.$(SHLIBEXT).$(TALLOC_COMPAT1_VERSION_MAJOR) $(TALLOC_COMPAT1_SOLIB): $(TALLOC_COMPAT1_OBJ) $(TALLOC_SOLIB) $(SHLD) $(SHLD_FLAGS) -o $@ $(TALLOC_COMPAT1_OBJ) \ $(TALLOC_SOLIB) $(SONAMEFLAG)$(TALLOC_COMPAT1_SONAME) all:: $(TALLOC_COMPAT1_SOLIB) install:: ${INSTALLCMD} -d $(DESTDIR)$(libdir) ${INSTALLCMD} -m 755 $(TALLOC_COMPAT1_SOLIB) $(DESTDIR)$(libdir) clean:: rm -f $(TALLOC_COMPAT1_OBJ) $(TALLOC_COMPAT1_SOLIB) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/configure0000770000000000000000000000066000000000000016767 0ustar00rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` if [ -f $PREVPATH/../../buildtools/bin/waf ]; then WAF=../../buildtools/bin/waf elif [ -f $PREVPATH/buildtools/bin/waf ]; then WAF=./buildtools/bin/waf else echo "replace: Unable to find waf" exit 1 fi # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd . || exit 1 $PYTHON $WAF configure "$@" || exit 1 cd $PREVPATH ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/context.png0000660000000000000000000001115300000000000020017 0ustar00rootroot00000000000000‰PNG  IHDRx(]È~ pHYsaa¨?§itEXtSoftwareGPL Ghostscript 8.63ô*8YôIDATxœíÜ1sÛFúÇñÕýÂM&{ææ\e6¥K¤¹—àK€»káw ÌuÀ½¢MG´êˆÖs Ѧ㦋Èu'3¸â‰÷ð'!Š’øõýT°»X`Áí‹a húË©;àü=;uŽªišÙlvê^àðþö·¿ýòË/»Ëüýïÿ÷¿ÿ}œþ`ÃÅ“ztº¸¸8u åÆ+ù«¯¾úí·ßŽÓlxZw4âIeëS°Ï÷GÛ¶¿ÿþû:ƒIÌÑàI¨ëúÓ§O§îÅÓEÐàI¨ëú?þðÞŸº#OAƒó×4DLUU§îËõ'ƒŸÔ!?7ël6kšÆc­]¯×ÖÚãuÆ‚g`Ïa}ùòåÇÒ#lâÑ €:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚€:‚æAhšæíÛ·òç»wﲆªªf³Ù¡vá½wþIñÞë°'ö>vÊŽ­Gt+—ñØÅ0 whñ‘’SpÌC®ë:I’= _\üo8ÆËJ»‹#txÇ®ÍÃúòåË?*u`¼wSQ–e–e·­µ»';¶Þç&ërG£«mÛSwç ïûSwá^E}ßWUuê^àÑkÛö±_HÏNݳÕ÷}Y–}ßçynŒqÎ¥ijŒišF 4M“$IE{6DZÔʲÌZ+›êºnÛV6c†²,sÖZï½s.«ëÚZÛ4tÏ{_–eUUrë[UU]×MÓ Ã6Íçsçœô$ÔXžçUU¥i𦩔<Þû¶måtí9pƘ²,¥¤Ü’„çèɛ܅÷¾®ëp!Åq,…ó<—1mÛ6\MÓ4MÇqß÷Þû}ºqÎM>æ×umŒ‘öe_ÛÕ7®+ñîÝ;9¢(Ìð”ÈñyÛkV«Õ0 ëõÚ9w]áñr–e‹ÅB–‹E–ea9I’PÌZ+-oȲ¬( Y^­VÖÚ°‹årê^×ç^Íçóñ·‹E±^¯·»¡gÏa}ñâÅ}öDZœÆ®ëv ÜøÏ,ËÆ§+MSÇënÿ]E.ƒÕjÇñv³ëõz÷i —âF÷Æ»[,iš†2óù<ìwÇu%‡eY×u®ÜÑóó‚fµZu]×uÝr¹Üý‘–…íËÅ#Ÿdç\¸2¤åíHõ0Þãb×åÚ>½šÜÔu]Çã}Çq‚f½^ï3pÃè“¶±~¹\FQ4\?pûïÂ9WÅò3ÙºÑìv­ [‹EH·°)Š¢ðm6u]·ûºêºnyÃ00GslÖÚ<ÏåþvŸò“7À²Ò{ž¡Œ1“7óRòÆbQU•µö±Ï&ìp«›| +w Üž»ðÞGQ&ïfoKž€ö<Šו<úµm+Ï\Úxççø‡v¾â8Þ¸§æ~w4“¶¿y¶w±cù¶›º®»î NÏžÃzÿG§}n¼uã̇[†ënŸ]È…äœ?” ;š6å6m½^︮$ø–Ë¥µ–G§# ç:Ív‚loÚXN’d´mûáÇpž·¯+y yyÙ÷½l­ªê§Ÿ~Š¢ˆ_ãÑ;ù/ƒq#&ƒ¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨#h¨;|Дeyò<(‡š¾ïOÞ€åÀAÓ¶mUU§mÀCó,,•e霳ÖzïsqcêºîûÞ9ç½·Ö&IbŒ©ªªª*)E‘lJÓÔ{/åó<7ÆÄq,eò<—–۶ͲÌZ›çyÓ4ÞûÅbÇñl6óÞ'I’¦éd ·a†aȲ¬( Y^­VÖÚa‹Eš¦ÃgY–-‹ð§µv¹\†å°>´)Š¢È²,´DZ,w]g­]­VÒr×u×µp@ãCÆÙØsX_¼xq„Î`’†a½^cÆuùü;çB” ð\.sÿ«9Úë–¥‘¢(–Ÿ·J¢m¤Ìv DМ%‚æá{fŒñÞc¬µá6'Š¢°~l{ͼ÷Q…' áóe!{ɲ¬išñ®œŸ¿cœsfê]¬ß½f‡¦i¤Êuñ$“AQÉŒÌu-xìþ š,ËÆ¿^‘OxQu]‡•u]Eqc‹r{Ò÷½,E1΋ðF©ïûº®“$‘ãm´à±»Ï2yž[kå¡É9'7/×½uòÞ—e™eYš¦UUɲ¼$’­QIaiDÖcdyž×u-s@Ƙï¾ûÎ{Ÿ¦iš¦Î¹ív´æÿ?¾á ì9¬/_¾üøñãQz„MOêSGМ%‚æáãß:PwþA3›Í˜TNëÙÍEÔÌf³#ì¥iš¦i¾üòË¿þõ¯²Fî´mÿøÇ?~üñÇS÷â ÿÄŠL-'Iòí·ßžºG8¼o¾ùæçŸÞ]fŸ9š¯¾úê·ß~;\¿ð§SÞÑGÇòF,ü)¯ºpNrwܶíï¿ÿ~ÿv°íüƒ†XÁžêºþôéÓ©{qžÎ2ØS]×üñÇþ nDÐÆ#ÿo‰ýxDÐÆŒþÙªªø?îüçh€}È\¿V u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u u uÍòÞ¿}ûöâââÔþDÐ<eYÞª¼sn±X(uT]×_ýuß÷'Ù;Aóœê ¦iNÒBÇY–Ykï¹÷»!hº¶m«ª:u/p0§ km–e÷Üõ=;ÕŽ¾²,ó<ŸÏç²¼\.s}ß—eE‘1¦mÛð‘ç¹µVÖ7MS…1¦ªªº®›¦†Á{_–eUUÃ0Hûyž;笵¡¶m›¦ Äq윫ëºïû<Ï1qÇq†aµZÉš,Ë‹…,/—Ki-˲,Ë®ÛÅø$‡å¢(B•Õj%U’$ %»®[.—Û-\Ww±XŒ«¯×ë݃MXkÃaýäñJõ°^þÜ®¾X,Ò4 eæóyèöö‰†A¦–Bá®ëvôÃ>ÃúâÅ‹û”éº.˲ù|¾\.ÃÁÃP…¬Y,Òÿõz-Q KE)lŒÉ>·,Õ³,ëºn½^Ë÷GEëõz½^;çâ8žÏç“-lX­V¡WY–-—ËÐ%)¦©¬_.—I’„³'ý ݸñtí ÙÅ3>Ýòé]Žcdåz½Å6†2hœsáRv¤¢\Lãß°4“uÇŸÒÉZÛ‡vc''wÿêQ?Ãè|nœXQEÈÊqîãA“$IèUÈå "MÓ0 ÛQÖo´|]šËMt×u’D¡ü÷7~c…Ö–Ë¥µV®ÞÉ/°C!hvÙÑñ'mÇÊ}‚f| 0& ÖZç\ˆ›íOõvÝí•÷šÉãÝ¿úd—dÍd³]×ÉE¿^¯7¢öFÚA#™ÂQº×uÝöE‘,_wŠ6ÖïHóaŠ¢ˆ¢hÇ5éÆo,I–®ëÆßO“_`‡Âdð-ÈŒÆÆôÇäÊ}šÚ®RU•ÜÐv]—¦éö[m™œ¬+óG·êÃ>4·?´qõq—dYÚœd­M’¤ªª¶meVèá“^Ù„I«í’“+ol9>Ïâc²,ÛqÆ®#_Wu]Ïf³o¿ýv»KÒæ»wïâ8N’$ô$Š¢ÉnÜAs ι,ËÆï€Ê²tÎÉÇ#¬Ü×puŽ×E1~w Õ›¦ ŸÌñ›HYèû^&ën¬¼íå>iòx÷¯¾Ñ¥º®oüؤiú0_±I·7¢|{eß÷ûçBøÚ0×§yÓ4Y–5M#Óê“-l»ñËS–eÛ¶2ã#¯&¿Àæ€wGÂþNòZd{ÊMÞŒ,—Ëù|nA¥Øä=ç|>— ¶ù|._ Rk±X„;U¹ÅM’¤(ŠÅb!‡u™FOÁlו•¡ò~'MÓÉ)=Ù¦6ß26M︌¬œ¬.H2Á)¥EQì>±· ÛÖºçMš¦áœÈ‘7ÍÑŒ«‡?­µ2(á¹rãT„µZ­Âì²s.Ìn·°!I’ñ¸‡)ùЇÕje­¿â¶Þ'Üvšl·‹á 7Hßl6“,ÐsqñäÎêÉKîÛÖÚgX_¾|ùñãÇû”Éó\š¼÷išÊ fxý/¯ö“$ñÞ‡·õq{ïåÇ ÙUUyïe&.I’¾ïC—Ò4•òÐÔ4MÛ¶rö꺖(cÌußÑCëQ8ì\ú¤'xVïiµZÉwi¸I¼­#ÜÑà>˜£90™bÈóüaÎ5>^$›ýß™Ìbño¿ýÖ¤ !!!>ÌÎÎîíímhhèìì‹Å&-1æÇtppàr¹1° XEá*¼P>lòOz½þÑ£G‰déÒ¥sæÌ¡gΜéää¤V«9!D§Ó9;;ÓŒŒŽŽúúúêt:‹¥×ëéÝ·žžžŽŽŽE‹y{{›œÇ¸Fóâæm „ôöövuu…††:;;[l õkלþóŸ#IJ–e7=€ì€m=;d@v`]àCÅ/µZ-“ɦ_¿ÆÆÆÌÑÙÙ/%%%OÈ`0š››gÏž½páBkh°R©ljjZ»v­««ë“s®^½ñ¶>z2Å7}ÅÅÅ"‘(%%%44Ôz688XRR"“Éöïßïëë‹HÈL7ÍAy²v¤9(@vÀŽ4åÈØ‘æ <Ùqi®¤¤ä?ÿù­kʰ#ÍAy²v¤9(@vМiÊ4jŸÊ4åÙAsP4åÙAsP4åÙAsP€ì4åÈšP€ì 9åÈšP€ì 9åÈšƒò€ì 9(Èšƒò°OÙAsP€ì 9åÈšP€ì 9åÈšP€ì 9åÈšP€ì 9å;“Ý¥K—êëëwìØŽpš¾¾¾?þØÑÑQ p8\`YvåååõõõóæÍ³‰¨T*ggç'dP(}ô"m?ŒÅÄĬ[·îŽÉG©T¾ôÒKo½õÂ4ù8XLÝ¿ÿ´Y>|a¶7~ùË_9rÄ &‘HÊÊÊ )K€ì²È ;¦ƒÁðÃ?ˆÅb&E©Tööö*•ÊÖÖV:E¥Rµ´´ç‘ËåÌKN'‰èã¡¡!©Tª×ë…B¡L&còkµZ¡PØÞÞn0pÍ!;&›3gΔ””<|ø022rëÖ­ ¥¥¥\.7///222**ŠRVVvôèQ±X¼ÿþ;v¨Tª²²² œ;wŽÒÔÔ´bÅŠŒŒ BÈñãǽ½½OŸ>½{÷n@PTTDéììܹs§H$úâ‹/’““qÙ­‡qæ“Ëåccc^^^ôäÖÓÓãïïOObmmm...l6›™ýþýïs¹\wwwzòòððxôèQHH!D*•²Ùl—û÷ï¹¹¹ÑÇÆÆ~üñÇ%K–¸¸¸×;wîÜ,^¼ØÑÑQ«ÕÒ¥˜<æ5`BOOÏÁƒ%‰ƒƒCooïçŸ~óæMBH}}}ccc]]ÝÝ»w[ZZ îÝ»Çb±6nÜšŸŸŸ}þüyú$áááo¼ñÆ7!™™™¥¥¥b±¸¼¼œÅbݼysýúõ111ÕÕÕ^^^111111õõõ¸ò6vg÷¸ÉÍ|£(êØ±c'NœèééÙ´iSmmí… L&Ï÷ßßÛÛ;??÷îÝ loo×jµ™™™EEEGŽ U(L½ !!aß¾}K–,©­­MLLÌÌÌ\¾|¹T*µX#‚ ÌQ(cccôbS¡P0#urrŠŽŽöððذaCMMMPP‹Å"„°Ùl>Ÿÿõ×_Óy˜ó˜GFFÒù#""¸\îƒx<ÞÙ³gò³³½½½qåmìÎî7¿ùÅÉíüùó&“صk×.]ºtëÖ-BˆX,~çwîܹséÒ%fò$„dee•••yzz¾ÿþûE­ZµêܹsŒ‹‹[¿~=EQ!!!ÿøÇ?¶lÙB×ëààpéÒ%Š¢üüü*++«ªª(Šò÷÷ÿûßÿž’’b±FĘ”•••’’ÂãñäryAAóÖŒ3˜e=ƒÒxzzΚ5‹>fvßôzýãªÐh4kÖ¬™7o^{{{MMMeeeaaa__ßÌ™3qýmikqrãñxï¾ûnccclllRR!ä›o¾ÉÍÍuqq¡—®&“'s†eË–BX,VTT”D"qrr (((P©Tl6{``À<çªU«˜ãŸþô§ô,m±FÌñññÙ¶m[jjªq¢Á``DUPP ‹éíšÖÖÖÄÄDBˆ«««P(¤7m.^¼h<Ɔ††èƒ«W¯&''Ï›7¯¸¸8--mÏž={öì –ËåÉÎâän2‰I¥Rÿììl“²Ìäi¡:®±±1--­¾¾ÞËËë»ï¾³øËøÝÌñãjÀ„ŠŠŠöööÏ>ûŒÅb¹¹¹ ‚ÑÑѦ¦&ww÷•+WÆÇÇoÚ´)==Ïççæævwws¹Ü×_’œœÜÛÛ»páBWQQqåÊ•˜˜Bˆ@ hhh˜?~HHH^^!¤­­íøÃ–-[š››ß|óMÚ›À–dgqr3ŸÄÖ®]›™™Ù××GïV 6›m‰íÚµëúõëk׮ݾ};‡Ã öññ1ž< !þóŸE"QyyùâÅ‹ƒ‚‚!ׯ_OII9yòä¶mÛbcc}}}¯\¹²yóæÛ·o777+Š7vttÐÇ«W¯&„ÔÔÔðx<óßxã ĘpöìYOOÏíÛ·3ûqÙÙÙŒ¡þÿ‚ŸŸŸŸŸŸIñY³fÑûwŽŽŽ&ùçÏŸo²þÆ5·:(3>ÿüó––óôááá®®.ƒÁ Ñh´Z­J¥Òét<¸uë–T*5ÎÙÓÓóÏþS©TR¥R©´Z­V«¥_R¥P(t:Z­Öh4Ei4ú¥L&“H$LN§T*µZ­F£Q«ÕÆÇE©Õj­VK›Ôh¡C‡(`OŒŽŽæææš$þío‹ŽŽÖétôࡲ=cE¡¡¡OUd`` ??1šžbÏÎâäfqóñññññaž0˜Lžôçã˜M7úl‡Ù×cŠ<ágM6kÀœøøx‹•““ãåå¥V«½ªx–[ÅÀÀÀÆÆÆ—_~Wxú,c°uø|>ŸÏ^gKMM5y° ¬ìå ;€ì²ëÂòН¾úêþýûÓ£‡jµa¶7Z[[+++­°a£££ˆÎTaá_)J¥ÒŽŽŽiÓCOOÏ€€DÚ~0 ßÿýÍ›7¿ýöÛ}ûöó5·nÝŠ‹‹{A­ª¯¯‰D K—.ÅwȬEvØ.z½¾¼¼üÎ;‰‰‰ã,EQTjjjQQó3'/‚Ó§O+Š}ûöáן ;&Us4gÏžår¹7nœ„vByS 9BˆH$:qâć~8™m¦•§T*322 <È€«¹I[ÀByS¦¹É_ÀByS ¹©ZÀByãÒÜÝ»wžQsS¾€…ò ;^ìÝœU-`‡D"9uê”ÙhîY±ª,”ÙhîùkÎj°PdìTsÏkoΜO>ù$00Ð:°OP^YYÙðððÞ½{¡<Èànn\ Ø>úÈøŸdÛ–òJKKGFF <È@sÓg åAvš³£,”Ùé ¹ŠŠŠÖÖÖ×^{íEkÎÖ°Pdl[s¿úÕ¯x<Þ$Ô8=°Pd 9;ZÀBy€æìhû?•÷ûßÿÿ9²ö¢¹i¿€…ò ;Íý—3gÎ,Z´hÆ vxý¡<ÈØ…æìm åAvÀ5g· Ø'+ott4==Ý>•Ùç¬9¡P¸uëÖ©Õ°PdìBs„Gøàëoç‡~¨V«'PP«Õ>|Øúûhœ|8N^^žmµY(þë_ÿ²(;6" ° ;dÓìÙÿò,ü6Áàà Á`ðôôd±X· ÑÖÖ¦ÓéÜÝÝýýý™D™LÖÕÕÅf³9ÎÒ¥Kqgþ7E544ìÚµËx$M3úûû_}õÕÅ‹ûøø¬[·®¿¿q·!T*UzzzHHHkk«ñ¸½~ýúoû[¥R‰e,}}}÷ïß?räˆÁ`˜®}¬««KJJ’H$"‘h``àèÑ£ˆ» ñÊ+¯ðx¼ÐÐP>Ÿ/“ÉèĹsçîØ±cÍš5+W®„ìlƒÁðÃ?ˆÅb&E©Tööö*•JffS©T---Æy!r¹œIÑét"‘ˆ>’J¥z½^(2c…ÁÇÇ'==}æÌ™Ößljuâë뛚šÊb±|}}zzz0Ìl(|„‡SZZÊáp’’’ôz=“ÈápÆßlÈκ8sæLIIÉÇ###·nÝšPZZÊåróòò"##£¢¢!eeeG‹Åû÷ïß±c‡J¥¢,XpîÜ9BHSSÓŠ+222!Ç÷öö>}úôîÝ»A@@@QQ‘-öñY:¸~ýz毢¥¥åÕW_ÅH³¡ðÑÌž=»ªªª©©iâФÀqèÐ!“”îîîÙ³gkµZŠ¢N:Åãñèô]»v………Éd²ºººæææƒÁ@Q”^¯_¾|ùÿøG:ÛæÍ› éã‚‚‚¸¸8úø'?ùIFF]äÆl6»­­Í¤êÎÎÎÇ †‚‚‚þþþ tphhèèѣϱÏÒAš{÷îmÞ¼™®ýɱ˜è6?mŠÑg _VVVoo/EQ—/_f±XEÉd²˜‡ø/ù‹ÅîàÎΊP(cccô¼B¡ ¥Óœœ¢££=<<6lØPSSD?Od³Ù|>ÿ믿f²1§29ŽŒŒ¤‹DDDp¹ÜØbŸ±ƒCCCüôÓO¬åC±±±ÅÅÅO›b£Cô¹ŒÏØØØ÷Þ{/55õîÝ»OÛx|ôÄŠ ÊÊÊJIIáñxr¹Üø¿Î˜1ƒÙøJ¥Lº§§§ñ·V™‡ ̾†9fÍš56ÚÇ wP£Ñ¼õÖ[§Nš?¾õDü½÷Þ3ù¸ÏxRlwˆ>—ñyøðáï¿ÿžÏç×ÖÖBv6ŒÏ¶mÛRSSMöƒ™QUPP ‹½¼¼!­­­‰‰‰ô[®®®B¡¢Õj/^¼hü ü¡¡!úàêÕ«ÉÉÉóæÍ3©—|z½þ©v|'¹î ^¯ÏÈÈHKKsssëêêR«Õ¾¾¾...Sî°°° ¤Øè}–ñ©Ñh4 }Ìb±JKK×­[—””ôTÛ¯uQQQÑÞÞþÙgŸ±X,777@0::ÚÔÔäîî¾råÊøøøM›6¥§§óùüÜÜÜîîn.—ûúë¯Óe’““{{{.\Èãñ***®\¹C óçÏ 1ÿj÷—_~yþüyBÈ;3˜˜ø¢ïû&ÜÇ wðÀÅÅÅÆ‹ÁÛ·o¯^½ãÍ&ÂWUUuõêU¹\ž™™¹hÑ"BÈÌ™3«ªª~ö³Ÿ=Ý6áAõìþöõõ¥¥¥ýõ¯½uëVuuõÎ;_yå•J¥ÕjµZ­R©drvuu555Éd2“3 wuu F£ÕjU*EQ«V­ª¬¬‹ÅôKs”J¥Z­ÖëõjµZ­V¿ÐÏØÇ‰uP¡P¨T*NG÷Q.—ët:kx@aoCtÂãS§Ó©Õj“'KR©´££cü(pggEœ={ÖÓÓsûöíÌfGvv6³•k¼­îççgqgÖ¬Yôþˆ£££I‘'ìU9;;›l»Xm'ÖAãëŒ3&¡›¢Ï1|ôø4ß`™3gΜ9sÆßx<µ"–-[vçÎzûL£Ñœ9s&''çÙOKÏ¢Ó¸VÕA Q« î쬈øøx‹•““ãåå¥V«APPгOÅ!!!/¿üòô룵uCÔjÃÙY|>ŸÏç?Ǧ¦¦š<8›f}´ÂbˆZgø°ŒØ²€éöì¦ N÷¸´±*Æÿãˆæ¨T*›è#æZD£ÑØDøŒ쬎•+W–””X;ÝÜÜ&PÐÙÙÙÕÕÕ&ú¸bÅ Hs‚‚‚l"|&¼öÚkÓñO²vöì@vÙd@vÙd€ì²È ;€ì`rù´î¯¬×ûIEND®B`‚././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/mainpage.dox0000660000000000000000000001073300000000000020125 0ustar00rootroot00000000000000/** * @mainpage * * talloc is a hierarchical, reference counted memory pool system with * destructors. It is the core memory allocator used in Samba. * * @section talloc_download Download * * You can download the latest releases of talloc from the * talloc directory * on the samba public source archive. * * @section main-tutorial Tutorial * * You should start by reading @subpage libtalloc_tutorial, then reading the documentation of * the interesting functions as you go. * @section talloc_bugs Discussion and bug reports * * talloc does not currently have its own mailing list or bug tracking system. * For now, please use the * samba-technical * mailing list, and the * Samba bugzilla * bug tracking system. * * @section talloc_devel Development * You can download the latest code either via git or rsync. * * To fetch via git see the following guide: * * Using Git for Samba Development * * Once you have cloned the tree switch to the master branch and cd into the * lib/tevent directory. * * To fetch via rsync use this command: * * rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/talloc . * * @section talloc_preample Preamble * * talloc is a hierarchical, reference counted memory pool system with * destructors. * * Perhaps the biggest difference from other memory pool systems is that there * is no distinction between a "talloc context" and a "talloc pointer". Any * pointer returned from talloc() is itself a valid talloc context. This means * you can do this: * * @code * struct foo *X = talloc(mem_ctx, struct foo); * X->name = talloc_strdup(X, "foo"); * @endcode * * The pointer X->name would be a "child" of the talloc context "X" which is * itself a child of mem_ctx. So if you do talloc_free(mem_ctx) then it is all * destroyed, whereas if you do talloc_free(X) then just X and X->name are * destroyed, and if you do talloc_free(X->name) then just the name element of * X is destroyed. * * If you think about this, then what this effectively gives you is an n-ary * tree, where you can free any part of the tree with talloc_free(). * * If you find this confusing, then run the testsuite to watch talloc in * action. You may also like to add your own tests to testsuite.c to clarify * how some particular situation is handled. * * @section talloc_performance Performance * * All the additional features of talloc() over malloc() do come at a price. We * have a simple performance test in Samba4 that measures talloc() versus * malloc() performance, and it seems that talloc() is about 4% slower than * malloc() on my x86 Debian Linux box. For Samba, the great reduction in code * complexity that we get by using talloc makes this worthwhile, especially as * the total overhead of talloc/malloc in Samba is already quite small. * * @section talloc_named Named blocks * * Every talloc chunk has a name that can be used as a dynamic type-checking * system. If for some reason like a callback function you had to cast a * "struct foo *" to a "void *" variable, later you can safely reassign the * "void *" pointer to a "struct foo *" by using the talloc_get_type() or * talloc_get_type_abort() macros. * * @code * struct foo *X = talloc_get_type_abort(ptr, struct foo); * @endcode * * This will abort if "ptr" does not contain a pointer that has been created * with talloc(mem_ctx, struct foo). * * @section talloc_threading Multi-threading * * talloc itself does not deal with threads. It is thread-safe (assuming the * underlying "malloc" is), as long as each thread uses different memory * contexts. * * If two threads uses the same context then they need to synchronize in order * to be safe. In particular: * * - when using talloc_enable_leak_report(), giving directly NULL as a parent * context implicitly refers to a hidden "null context" global variable, so * this should not be used in a multi-threaded environment without proper * synchronization. In threaded code turn off null tracking using * talloc_disable_null_tracking(). * - the context returned by talloc_autofree_context() is also global so * shouldn't be used by several threads simultaneously without * synchronization. * */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/stealing.png0000660000000000000000000001552200000000000020145 0ustar00rootroot00000000000000‰PNG  IHDR±ž3?£±sRGB®Îé pHYs  šœtIMEÜ 1r·¯ÃtEXtCommentCreated with GIMPW¿IDATxÚíyXSWþÆoB$ˆa‘¨ 2¸Œ”·¢c nTÁAáí¨ƒ:—VpTÔ‚âÂT;µ£ŽâÒªàŠJÇÇE¨e!@ âŽÂ6lYÉýý‘yòC B–›ð~þð1'÷ž“œóý¾Ü÷Üsnh$I‚ :º ‰Mh"@š½€AÙOÖÐÐPZZúúõkùËþýû{xx¸¸¸0  MMM¥¥¥/_¾”/kc2™Çïׯ:Ç¡Qm}"‡ÃùñÇ[ZZlmm===]\\h4AÍÍÍ\.·¬¬L"‘x{{/\¸ÐÂÂãôů¿þzúô醆‹5zôh‚ Z[[KKKKKKE"ј1cBCCˆî‚&ªÃ­[·®^½úÛßþ688¸ë0zòäIZZš™™Ytt´µµ5Fè’G¥¦¦Ž5*$$ÄÖÖ¶‹#‹ŠŠRSSe2YTT›ÍF×AU¥¾¾~ûöí>>>áááò«BU¨¬¬LLLœ6mÚÂ… 1@´´´ìرÃÕÕuÅŠtºªsñuuu»ví3f̲eËЇÐÄî),,üÇ?þ±mÛ¶Áƒ«qú™3g¸\n\\œÜ¹ %ÊËË÷ìÙ³yófggg5N¿zõêƒLMMÑ™ÐÄNyøðá•+WvïÞÝEËÏÏONN>pàfµ–ÈÏÏÿ׿þ•””Äd2Õ®¤¬¬,))éÀæææèRh¢8ÎÑ£G÷ï߯º_äääo¿ý–:=>lØ0mÔ\[[›œœŒØÕ/_¾LLLüþûï{¿æáÅ‹‰‰‰‡VÝzS–°°0õ.™©ᤞhjjŠŒŒ”H$šªðÎ;GŽ!)Cll¬ÁÕ >D$ýùÏšª077wïÞ½FÐ3ÆázûcµsçÎÍ›7kp±áŒ3x<Þ¯¿þŠë AöíÛ·nÝ:333MUèíímjjúŸÿü}KMô£‰yyyÆ srrÒlµ_}õÕßÿþw *Ðåååt:}Ô¨Qš­6** ³ÐÄ÷8vìXDD„Æ«5337nÜãÇ1®@#:thõêÕ¯ÖÄÄdæÌ™ÿþ÷¿ÑÃÐD‚ ˆŠŠŠ#FhiEÂÒ¥K/]º„q½§±±qÀ€ZÚ…tëÖ-t24‘ âìÙ³aaaZªÜÔÔÔÄÄD(R­£[[[/\¸°nÝ:Äœ¡páÂím Óé¶¶¶µµµFÓ]FázÐÄÊÊJ–ÉdUUU=ªêÔ©YYYTëh’$Åbñ 5†BqqñèÑ£µ¨~~~Æt©h4N×}Çõè^óÒ¥K<Ø£&&MšôË/¿P­£-,,&Ož ¡1¤ÜèÉB5ÕËË«  ÀhºËh"\ךøîÝ»®o7Ëd²ÒÒR>Ÿ¯¸ ·±±!B$•””(V˜WWW·´´(­ÁÚÚº¡¡Ê^SSSPP ‹%ÍÍÍùùù@Q"*++AQQäI÷‚®w›t¨ªÇ*ƒÁhkk3ÊÞ3è×õ³«ªªºØ×üàÁƒ+W®˜››gddDEEÙÙÙåææ¾|ù233sÖ¬Y_ýuLLÌÚµkóòòöîÝûÍ7ßtö¸°ÞoŒÑÇŽûé§ŸŠŠŠ¤Ré;wØlvLLŒMNN—Ë-((077?þü_|úèÑ£/^Ó¬“¡Àãñºx’Mºÿþ¼¼¼¨¨(Ub•ÊÚw#\ÇkÄ333¯]»¦ô-‡3kÖ,™LF’dEEÅåË—I’ øî»ïçš™™=~ü8""¢ë}TØéñágxñâ…<ä3/&LˆŽŽ …÷îÝ#IR&“ýæ7¿ÉÈȼdÉ’>ú¨¾¾þöíÛØÇ¢{ŠŠŠNž<©^ ª«=”ÆẾNd2™Ý>þ¼§§§ü/§££c```‡|}}׬Y3sæÌÒÒÒ®÷PíA¹íùøã ‚èׯ߂ =zÄd2“’’„B!NWx1&“9kÖ,‹õÉ'ŸàªM÷0™ÌöF¯Gªz¬R9Pûl„ëz>ÑÆÆ¦®®®³©Æêêê®Og³ÙC† 9zô¨!ÆJ‡hkk7n\VVV@@À矾eËWWW™L¦8•Ò#½ TCÕ¾áºÖD—W¯^)}kúôééééïÞ½{ïóÑéŠùéóçÏ[YY¥¥¥%&&>|ø°³&„B¡w§j‘H$ŸZ&B"‘deeýå/IOO÷ôôd³Ù|>ŸËå*¢J&“µ cº¸S×m ª«F†qD¸®½3ƒÁH$Jß ËÌÌôññY¶lYsssHHÈ”)Süýýããã Î;wéÒ%¶}ûöE‹ýðÃJ¯ºóòòÆOÁ¾njjrssóööþä“O,--9booïççàè蘑‘1{öì§OŸfgg0ÀËËkÁ‚P(*\õ¨¨)))/^üñǻղ²2777cê1#‰pÝOaÆÅÅ566vönEEÅÇ+++%555B¡°ý12™L$‰Åb¥5ÄÇÇ×ÖÖRpZ H¥Òºººêêêöåõõõ555$I …B‰D"•Jåÿ‘H$J§çqE7ìÛ·¯¢¢¢7Úu¬~÷ÝwåååÆtÅ8"\ûX-ZtîܹÎÞuttœ:ujûõ:¶¶¶nL£ÑLMM•>U›$I§X)F)ÌÌÌLLL¬­­ Ô¾œÅbÉêˆÉd2 ù 5'úaaa?üðCoµëX-++1b„1õ˜qD¸4q̘1Ú[¾ûömä3è=NNNïÞ½ÓҲ꼼¼>úLAôó¬0???m<(‰$ÉË—/Ï›7ã 4åiΞ=«šOœ8±xñbô04ñÌŸ??==]ãO¯9}útpp°Qî zaÊ”)¹¹¹ß*zýúu___ü¤4ñ=Ö¯_¿mÛ6 Vøüùó‚‚‚3f`PÙ¸qã–-[4XaUUUFF~”œ²0ôÕ°««ë”)SŽ?¾|ùòÞ×ÖØØ¸wïÞýû÷S§gkjjÒÒÒ´Qsss3Wg888,Z´èÀk×®í}mB¡pÇŽ‰‰‰FÐ3Æázþ}ç”””þýû‡††öR7mÚ´cÇ;;;êDLii©–†ÖÚÚÚÈîWRŸ‹/ÖÖÖ®\¹²7•‚˜˜˜72Äú¤Gžššb®÷UN§NJHHJ¥ê^\\ü§?ý‰Ïçc=Ð*W®\Ù´i“H$Rïô/^,_¾üíÛ·}³÷ hQ-Cïm–,Y’——¹~ýzÕO”J¥ÉÉÉUUU‡ÂÖ` mæÏŸïîîþÅ_¬^½ÚËË«G—'Ož,..>xð`×ÏdT@ÏÞ¹½­8xð`}}ýªU«\\\º>¸­­íÂ… ™™™Ÿþùĉ1Š@gH$’Ç¿~ý:""¢Û?á2™,##ã§Ÿ~Z¸pa¿û·iÓ&C™E¥Š&*fSRRÊÊʆ âëë;zôh‹¥ÐÁ—/_æææÊªtÁ‚òG {Z[[O:ÅápØl¶¯¯ïرc[§H’|õêU^^^vv¶T*;w.6@5@eeevvvIIIcc£ü‰Ç4ÍÅÅeܸq&LÀÂ.@ø|~vvvqqq}}½|a,I’ÎÎÎcÇŽõññùp«4š¨¶oß‹¿@u£ÐD€&4 ‰Mh"@šÐD€&4 ‰Mh"@šÐD€&4 ‰M#†.º§¸¸øÆ#GŽœ?>zà:2™¬ªªJõãMMM¯_¿ž——‡®ÐD`„,]ºôàÁƒªïêêêââ‚~ÐD` H$‡S^^.“Éä%­­­666ФRiaaaSSSû³š››óóó:PýÌ'ÖÔÔäçç«x°@ ¸wÒGµ´´œ4iÆU{¼yófÆ Ë–-+..ÎÍÍ={öìµk×rss_¾|™™™ùÍ7ߤ¥¥1Œ1cƬ\¹2!!ÁÏÏO"‘ÄÄÄØØØäääp¹Ü‚‚ssóÞøô·oß655 8ÐÉɉF£aPôBIIIEE…Š‹Åâ[·n©xððáÃGŽÙ·®srr8ŽŠûûû«(ˆA=zÁªUÎ;Çf³çÎûå—_®ZµŠ ˆÏ>ûlìØ±Ë—/¿|ùriié•+W6lØ0kÖ¬Õ«WÇÄÄÈU,00pË–-W¯^555½ÿ~o>@qqñ‹/jjjÊËËËÊÊ0"ú¢G¹6gÎlkkKIIés׉Aüîw¿ûøã5^ííÛ·¬ZeêÔ©qqqYYYaaaÞ½wï^ccc|||ÿþýX,AL&ÓÙÙ9))I(Òét>Ÿß›‹Äºº:Å˪ª*ggg333Œ‹î155õóóÓxµR©ôáÇ}Q2yòäòòòëׯ§¥¥íß¿¿ªªÊÒÒRñn]]ݰaönÝÚþ”¬¬¬U«Vݹs‡Ífçææ*f!Õ@"‘$Ù¾äÝ»w#FŒÀ¸ÃöÎÀp9räÈàÁƒW¬XqãÆ ‡––‚ ètºü?“&MÊÊÊR¬Ë‘Ë_zzº§§'›Íæóù\.W.j2™L qd2™í%˜ ùÜ"Æà:è‡ÒÒÒèèè9sæ<}ú4**ŠÍfáïï_XXxäÈ‘ÌÌÌI“&…††š˜˜¸¹¹-_¾ÜÏÏ/(((888 ÀÑÑ1##ƒÉdfgg³X¬™3göt eðàÁÏž=S¼$I’ÃáxyyÁAh"Ðûöí{þüyMMMdd¤µµµ¼0:::<<ÜÒÒ’Édž8q¢²²òíÛ·cÇŽ•딿¿EEE[[›­­íâÅ‹MLLÄbqHHI’j\*:::VVVÊ/KåˆÅâ§OŸŽ?žÉdb€€Qyçúúúµk×^¼xCYäW“'OV¢[[[…$988L˜0¡ý…‹Å²µµ•›_ƒannÎ`0úõ맆ŠÑh4ww÷Kp„Ba^^L4²Ø¨4Q$ÅÄÄÌž=ÛÊÊ ºÀÊÊÊÕÕµC¡X,ÎÏϯ©©Aÿ ‹Ä;ŸýôÓ±cÇîÛ·/00ðÃÂ˃ÓÚÚŠNÐc+Má­[·vVïÜ ×¯_wuu•ß[¤ÓéAAA7oÞTZˆ@Jé0¨Â]”C»÷>í·µÚÙÙYYY)-Dð4hШQ£ÐTKá.Êá•Ð~³×´iÓ’’’x<ž|›DQQÑ¢E‹>,Dð©ÇÀétÞÙI’dcc£X,îPÞ¯_?77·Aƒaˆõ›ÅJS¸‹rhbGnÞ¼ùèÑ#ssóÌÌÌiÓ¦ùûûGFFÅÇÇWTT¸¸¸üñ$Bi!Pƒ¡C‡ôç/++ûPÍÍÍ›g€~³¸³î¬šØ‘iÓ¦åçç+6{Ñh´ýû÷WTT¼yó&00PþÈ)‚ ”‚¾Ç«¬¬ìPhmmíéé©ú£6V³¸³î¬šØ¥{¼œœœœœœT)}©TZ^^Þ¡ÐÆÆfôèÑ=`|˜Åe+•³1 Œ7oÞH$’ö%VVVDM}×8¿çt OOO"€&‚¾ˆH$‰DíK\\\ðˆ0M}SSÓö—„t:]¾¤M¡·{,‡¾víZû’¶¶¶=ËDéñ\.ƒjÄÐh´!C†¼~ýZþrĈ¸Ñ¬/ZZZbccUÉÊe±L&Óó…?I¢££ëêê>,ß¶m›@ ø°üoû[^^ úõõõoß¾ýïÿ‹® Ú¸DEE)}+66ViyDD„P(¤Ô· ŠwNKKóõõíÑÕ5kÖ8p„ꃰX,'''ìì¤Û·o‹‹ëÑ)7nŒÇ|bGx<ÞÝ»w{ºÅ‡Á`lÚ´iÏž=ˆEôÎ… |||zº«ÒÝÝÝÎÎîÁƒÐÄ÷HHHøúë¯Õ8ÑÃÃÃÜÜüñãLjHôŸÏ¿uëVxx¸ç®Y³æÄ‰šøÿ®yÚ´ivvvê¾~ýúï¿ÿ=²sçNõ.k‚ Óé7nܹs'4Q}×Ü8h Î5SÖAëYãââÔþó €Þ©®®VÛ5SÓAëSSSSýüüÔvÍpÐè]»võþ²†RZošÈãñîÝ»§©ÇIÂA`ˆ®™‚Zoš¨×  €¡»fª9hýh¢]34íš©æ õ ‰šuÍpкk¦”ƒÖƒ&jÜ5ÃA`è®™:Zך¨%×  €A»fê8hj¢ö\34†îš)â uª‰ZuÍpкk¦‚ƒÖ&¦¦¦Î;W«®ƒvÍTpÐ:ÒD¹k ÔÙƒƒÀ]³Þ´Ž4Qg®CwÍúuкÐD»f8h Ú5ë×Ak]uïšá 0t׬G­uMÔ‹k†ƒÀÐ]³¾´v5Q®ƒvÍúrÐZÔDýºf8h Ý5ëÅAkQõîšá 0t׬{­-M¤ˆk†ƒÀ ]³î´V4‘ÇãÝ¿Ÿ ®µ]óĉ©àš;8èAƒiÕAkEãââvìØAÁaöðð°°°€ƒ@×FÁÏ­U­yM¤ knϺuëà 0,׬K­aM¤¦kþÐAïÞ½q€¹ætff¦hâÖ­[©éš;8hKKK8h”ºæ›7oRÓ5wpÐÇŽkii¡´&¦¦¦PÖ5ÃA Škާþç¤Óéýë_©«‰ÔwÍpкkÖƒ¦‘$©xQVVvéÒ%õ*zöì›Í0`€üåŠ+lmm»=+''çþýûÝÖõõõ555$I …B‰D"•JI’"‘¨­­M$‰D¢§?~¼¤¤„T]»v544tÛbkk«P(”J¥ò[ZZäCAll,©JTúy<ÞÏ?ÿÜÚÚªz=èÕ#¼ë,ÖxNé>‹»NáÎêé~š“Åb)f1…Šçjã1‚¶Ø¿Å»¦¦¦oTéw´··×ø¬z¡of±z)Œg@4 ‰MUy½ýéÓ§Õþ9SµµµŸ~ú©*G>Samba and SSSD developers over the years. These will help you to write code which is better, easier to debug and with as few (hopefully none) memory leaks as possible. @section bp-hierarchy Keep the context hierarchy steady The talloc is a hierarchy memory allocator. The hierarchy nature is what makes the programming more error proof. It makes the memory easier to manage and to free. Therefore, the first thing we should have on our mind is: always project your data structures into the talloc context hierarchy. That means if we have a structure, we should always use it as a parent context for its elements. This way we will not encounter any troubles when freeing the structure or when changing its parent. The same rule applies for arrays. For example, the structure user from section @ref context-hierarchy should be created with the context hierarchy illustrated on the next image. @image html context_tree.png @section bp-tmpctx Every function should use its own context It is a good practice to create a temporary talloc context at the function beginning and free the context just before the return statement. All the data must be allocated on this context or on its children. This ensures that no memory leaks are created as long as we do not forget to free the temporary context. This pattern applies to both situations - when a function does not return any dynamically allocated value and when it does. However, it needs a little extension for the latter case. @subsection bp-tmpctx-1 Functions that do not return any dynamically allocated value If the function does not return any value created on the heap, we will just obey the aforementioned pattern. @code int bar() { int ret; TALLOC_CTX *tmp_ctx = talloc_new(NULL); if (tmp_ctx == NULL) { ret = ENOMEM; goto done; } /* allocate data on tmp_ctx or on its descendants */ ret = EOK; done: talloc_free(tmp_ctx); return ret; } @endcode @subsection bp-tmpctx-2 Functions returning dynamically allocated values If our function returns any dynamically allocated data, its first parameter should always be the destination talloc context. This context serves as a parent for the output values. But again, we will create the output values as the descendants of the temporary context. If everything goes well, we will change the parent of the output values from the temporary to the destination talloc context. This pattern ensures that if an error occurs (e.g. I/O error or insufficient amount of the memory), all allocated data is freed and no garbage appears on the destination context. @code int struct_foo_init(TALLOC_CTX *mem_ctx, struct foo **_foo) { int ret; struct foo *foo = NULL; TALLOC_CTX *tmp_ctx = talloc_new(NULL); if (tmp_ctx == NULL) { ret = ENOMEM; goto done; } foo = talloc_zero(tmp_ctx, struct foo); /* ... */ *_foo = talloc_steal(mem_ctx, foo); ret = EOK; done: talloc_free(tmp_ctx); return ret; } @endcode @section bp-null Allocate temporary contexts on NULL As it can be seen on the previous listing, instead of allocating the temporary context directly on mem_ctx, we created a new top level context using NULL as the parameter for talloc_new() function. Take a look at the following example: @code char *create_user_filter(TALLOC_CTX *mem_ctx, uid_t uid, const char *username) { char *filter = NULL; char *sanitized_username = NULL; /* tmp_ctx is a child of mem_ctx */ TALLOC_CTX *tmp_ctx = talloc_new(mem_ctx); if (tmp_ctx == NULL) { return NULL; } sanitized_username = sanitize_string(tmp_ctx, username); if (sanitized_username == NULL) { talloc_free(tmp_ctx); return NULL; } filter = talloc_aprintf(tmp_ctx,"(|(uid=%llu)(uname=%s))", uid, sanitized_username); if (filter == NULL) { return NULL; /* tmp_ctx is not freed */ (*@\label{lst:tmp-ctx-3:leak}@*) } /* filter becomes a child of mem_ctx */ filter = talloc_steal(mem_ctx, filter); talloc_free(tmp_ctx); return filter; } @endcode We forgot to free tmp_ctx before the return statement in the filter == NULL condition. However, it is created as a child of mem_ctx context and as such it will be freed as soon as the mem_ctx is freed. Therefore, no detectable memory leak is created. On the other hand, we do not have any way to access the allocated data and for all we know mem_ctx may exist for the lifetime of our application. For these reasons this should be considered as a memory leak. How can we detect if it is unreferenced but still attached to its parent context? The only way is to notice the mistake in the source code. But if we create the temporary context as a top level context, it will not be freed and memory diagnostic tools (e.g. valgrind) are able to do their job. @section bp-pool Temporary contexts and the talloc pool If we want to take the advantage of the talloc pool but also keep to the pattern introduced in the previous section, we are unable to do it directly. The best thing to do is to create a conditional build where we can decide how do we want to create the temporary context. For example, we can create the following macros: @code #ifdef USE_POOL_CONTEXT #define CREATE_POOL_CTX(ctx, size) talloc_pool(ctx, size) #define CREATE_TMP_CTX(ctx) talloc_new(ctx) #else #define CREATE_POOL_CTX(ctx, size) talloc_new(ctx) #define CREATE_TMP_CTX(ctx) talloc_new(NULL) #endif @endcode Now if our application is under development, we will build it with macro USE_POOL_CONTEXT undefined. This way, we can use memory diagnostic utilities to detect memory leaks. The release version will be compiled with the macro defined. This will enable pool contexts and therefore reduce the malloc() calls, which will end up in a little bit faster processing. @code int struct_foo_init(TALLOC_CTX *mem_ctx, struct foo **_foo) { int ret; struct foo *foo = NULL; TALLOC_CTX *tmp_ctx = CREATE_TMP_CTX(mem_ctx); /* ... */ } errno_t handle_request(TALLOC_CTX mem_ctx) { int ret; struct foo *foo = NULL; TALLOC_CTX *pool_ctx = CREATE_POOL_CTX(NULL, 1024); ret = struct_foo_init(mem_ctx, &foo); /* ... */ } @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_context.dox0000660000000000000000000001577700000000000021770 0ustar00rootroot00000000000000/** @page libtalloc_context Chapter 1: Talloc context @section context Talloc context The talloc context is the most important part of this library and is responsible for every single feature of this memory allocator. It is a logical unit which represents a memory space managed by talloc. From the programmer's point of view, the talloc context is completely equivalent to a pointer that would be returned by the memory routines from the C standard library. This means that every context that is returned from the talloc library can be used directly in functions that do not use talloc internally. For example we can do the following: @code char *str1 = strdup("I am NOT a talloc context"); char *str2 = talloc_strdup(NULL, "I AM a talloc context"); printf("%d\n", strcmp(str1, str2) == 0); free(str1); talloc_free(str2); /* we can not use free() on str2 */ @endcode This is possible because the context is internally handled as a special fixed-length structure called talloc chunk. Each chunk stores context metadata followed by the memory space requested by the programmer. When a talloc function returns a context (pointer), it will in fact return a pointer to the user space portion of the talloc chunk. If we to manipulate this context using talloc functions, the talloc library transforms the user-space pointer back to the starting address of the chunk. This is also the reason why we were unable to use free(str2) in the previous example - because str2 does not point at the beginning of the allocated block of memory. This is illustrated on the next image: @image html context.png The type TALLOC_CTX is defined in talloc.h to identify a talloc context in function parameters. However, this type is just an alias for void and exists only for semantical reasons - thus we can differentiate between void * (arbitrary data) and TALLOC_CTX * (talloc context). @subsection metadata Context meta data Every talloc context carries several pieces of internal information along with the allocated memory: - name - which is used in reports of context hierarchy and to simulate a dynamic type system, - size of the requested memory in bytes - this can be used to determine the number of elements in arrays, - attached destructor - which is executed just before the memory block is about to be freed, - references to the context - children and parent contexts - create the hierarchical view on the memory. @section context-hierarchy Hierarchy of talloc context Every talloc context contains information about its parent and children. Talloc uses this information to create a hierarchical model of memory or to be more precise, it creates an n-ary tree where each node represents a single talloc context. The root node of the tree is referred to as a top level context - a context without any parent. This approach has several advantages: - as a consequence of freeing a talloc context, all of its children will be properly deallocated as well, - the parent of a context can be changed at any time, which results in moving the whole subtree under another node, - it creates a more natural way of managing data structures. @subsection Example We have a structure that stores basic information about a user - his/her name, identification number and groups he/she is a member of: @code struct user { uid_t uid; char *username; size_t num_groups; char **groups; }; @endcode We will allocate this structure using talloc. The result will be the following context tree: @image html context_tree.png @code /* create new top level context */ struct user *user = talloc(NULL, struct user); user->uid = 1000; user->num_groups = N; /* make user the parent of following contexts */ user->username = talloc_strdup(user, "Test user"); user->groups = talloc_array(user, char*, user->num_groups); for (i = 0; i < user->num_groups; i++) { /* make user->groups the parent of following context */ user->groups[i] = talloc_asprintf(user->groups, "Test group %d", i); } @endcode This way, we have gained a lot of additional capabilities, one of which is very simple deallocation of the structure and all of its elements. With the C standard library we need first to iterate over the array of groups and free every element separately. Then we must deallocate the array that stores them. Next we deallocate the username and as the last step free the structure itself. But with talloc, the only operation we need to execute is freeing the structure context. Its descendants will be freed automatically. @code talloc_free(user); @endcode @section keep-hierarchy Always keep the hieararchy steady! The talloc is a hierarchy memory allocator. The hierarchy nature is what makes the programming more error proof. It makes the memory easier to manage and to free. Therefore, the first thing we should have on our mind is: always project our data structures into the talloc context hierarchy. That means if we have a structure, we should always use it as a parent context for its elements. This way we will not encounter any troubles when freeing this structure or when changing its parent. The same rule applies for arrays. @section creating-context Creating a talloc context Here are the most important functions that create a new talloc context. @subsection type-safe Type-safe functions It allocates the size that is necessary for the given type and returns a new, properly-casted pointer. This is the preferred way to create a new context as we can rely on the compiler to detect type mismatches. The name of the context is automatically set to the name of the data type which is used to simulate a dynamic type system. @code struct user *user = talloc(ctx, struct user); /* initialize to default values */ user->uid = 0; user->name = NULL; user->num_groups = 0; user->groups = NULL; /* or we can achieve the same result with */ struct user *user_zero = talloc_zero(ctx, struct user); @endcode @subsection zero-length Zero-length contexts The zero-length context is basically a context without any special semantical meaning. We can use it the same way as any other context. The only difference is that it consists only of the meta data about the context. Therefore, it is strictly of type TALLOC_CTX*. It is often used in cases where we want to aggregate several data structures under one parent (zero-length) context, such as a temporary context to contain memory needed within a single function that is not interesting to the caller. Allocating on a zero-length temporary context will make clean-up of the function simpler. @code TALLOC_CTX *tmp_ctx = NULL; struct foo *foo = NULL; struct bar *bar = NULL; /* new zero-length top level context */ tmp_ctx = talloc_new(NULL); if (tmp_ctx == NULL) { return ENOMEM; } foo = talloc(tmp_ctx, struct foo); bar = talloc(tmp_ctx, struct bar); /* free everything at once */ talloc_free(tmp_ctx); @endcode @subsection context-see-also See also - talloc_size() - talloc_named() - @ref talloc_array - @ref talloc_string */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_debugging.dox0000660000000000000000000000765000000000000022226 0ustar00rootroot00000000000000/** @page libtalloc_debugging Chapter 6: Debugging Although talloc makes memory management significantly easier than the C standard library, developers are still only humans and can make mistakes. Therefore, it can be handy to know some tools for the inspection of talloc memory usage. @section log-abort Talloc log and abort We have already encountered the abort function in section @ref dts. In that case it was used when a type mismatch was detected. However, talloc calls this abort function in several more situations: - when the provided pointer is not a valid talloc context, - when the meta data is invalid - probably due to memory corruption, - and when an access after free is detected. The third one is probably the most interesting. It can help us with detecting an attempt to double-free a context or any other manipulation with it via talloc functions (using it as a parent, stealing it, etc.). Before the context is freed talloc sets a flag in the meta data. This is then used to detect the access after free. It basically works on the assumption that the memory stays unchanged (at least for a while) even when it is properly deallocated. This will work even if the memory is filled with the value specified in TALLOC_FREE_FILL environment variable, because it fills only the data part and leaves the meta data intact. Apart from the abort function, talloc uses a log function to provide additional information to the aforementioned violations. To enable logging we shall set the log function with one of: - talloc_set_log_fn() - talloc_set_log_stderr() The following code is a sample output of accessing a context after it has been freed: @code talloc_set_log_stderr(); TALLOC_CTX *ctx = talloc_new(NULL); talloc_free(ctx); talloc_free(ctx); results in: talloc: access after free error - first free may be at ../src/main.c:55 Bad talloc magic value - access after free @endcode Another example is an invalid context: @code talloc_set_log_stderr(); TALLOC_CTX *ctx = talloc_new(NULL); char *str = strdup("not a talloc context"); talloc_steal(ctx, str); results in: Bad talloc magic value - unknown value @endcode @section reports Memory usage reports Talloc can print reports of memory usage of a specified talloc context to a file (to stdout or stderr). The report can be simple or full. The simple report provides information only about the context itself and its direct descendants. The full report goes recursively through the entire context tree. See: - talloc_report() - talloc_report_full() We will use the following code to retrieve the sample report: @code struct foo { char *str; }; TALLOC_CTX *ctx = talloc_new(NULL); char *str = talloc_strdup(ctx, "my string"); struct foo *foo = talloc_zero(ctx, struct foo); foo->str = talloc_strdup(foo, "I am Foo"); char *str2 = talloc_strdup(foo, "Foo is my parent"); /* print full report */ talloc_report_full(ctx, stdout); @endcode It will print a full report of ctx to the standard output. The message should be similar to: @code full talloc report on 'talloc_new: ../src/main.c:82' (total 46 bytes in 5 blocks) struct foo contains 34 bytes in 3 blocks (ref 0) 0x1495130 Foo is my parent contains 17 bytes in 1 blocks (ref 0) 0x1495200 I am Foo contains 9 bytes in 1 blocks (ref 0) 0x1495190 my string contains 10 bytes in 1 blocks (ref 0) 0x14950c0 @endcode We can notice in this report that something is wrong with the context containing struct foo. We know that the structure has only one string element. However, we can see in the report that it has two children. This indicates that we have either violated the memory hierarchy or forgotten to free it as temporary data. Looking into the code, we can see that "Foo is my parent" should be attached to ctx. See also: - talloc_enable_null_tracking() - talloc_disable_null_tracking() - talloc_enable_leak_report() - talloc_enable_leak_report_full() */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_destructors.dox0000660000000000000000000000464100000000000022651 0ustar00rootroot00000000000000/** @page libtalloc_destructors Chapter 4: Using destructors @section destructors Using destructors Destructors are well known methods in the world of object oriented programming. A destructor is a method of an object that is automatically run when the object is destroyed. It is usually used to return resources taken by the object back to the system (e.g. closing file descriptors, terminating connection to a database, deallocating memory). With talloc we can take the advantage of destructors even in C. We can easily attach our own destructor to a talloc context. When the context is freed, the destructor will run automatically. To attach/detach a destructor to a talloc context use: talloc_set_destructor(). @section destructors-example Example Imagine that we have a dynamically created linked list. Before we deallocate an element of the list, we need to make sure that we have successfully removed it from the list. Normally, this would be done by two commands in the exact order: remove it from the list and then free the element. With talloc, we can do this at once by setting a destructor on the element which will remove it from the list and talloc_free() will do the rest. The destructor would be: @code int list_remove(void *ctx) { struct list_el *el = NULL; el = talloc_get_type_abort(ctx, struct list_el); /* remove element from the list */ } @endcode GCC version 3 and newer can check for the types during the compilation. So if it is our major compiler, we can use a more advanced destructor: @code int list_remove(struct list_el *el) { /* remove element from the list */ } @endcode Now we will assign the destructor to the list element. We can do this directly in the function that inserts it. @code struct list_el* list_insert(TALLOC_CTX *mem_ctx, struct list_el *where, void *ptr) { struct list_el *el = talloc(mem_ctx, struct list_el); el->data = ptr; /* insert into list */ talloc_set_destructor(el, list_remove); return el; } @endcode Because talloc is a hierarchical memory allocator, we can go a step further and free the data with the element as well: @code struct list_el* list_insert_free(TALLOC_CTX *mem_ctx, struct list_el *where, void *ptr) { struct list_el *el = NULL; el = list_insert(mem_ctx, where, ptr); talloc_steal(el, ptr); return el; } @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_dts.dox0000660000000000000000000000632100000000000021057 0ustar00rootroot00000000000000/** @page libtalloc_dts Chapter 3: Dynamic type system @section dts Dynamic type system Generic programming in the C language is very difficult. There is no inheritance nor templates known from object oriented languages. There is no dynamic type system. Therefore, generic programming in this language is usually done by type-casting a variable to void* and transferring it through a generic function to a specialized callback as illustrated on the next listing. @code void generic_function(callback_fn cb, void *pvt) { /* do some stuff and call the callback */ cb(pvt); } void specific_callback(void *pvt) { struct specific_struct *data; data = (struct specific_struct*)pvt; /* ... */ } void specific_function() { struct specific_struct data; generic_function(callback, &data); } @endcode Unfortunately, the type information is lost as a result of this type cast. The compiler cannot check the type during the compilation nor are we able to do it at runtime. Providing an invalid data type to the callback will result in unexpected behaviour (not necessarily a crash) of the application. This mistake is usually hard to detect because it is not the first thing which comes the mind. As we already know, every talloc context contains a name. This name is available at any time and it can be used to determine the type of a context even if we lose the type of a variable. Although the name of the context can be set to any arbitrary string, the best way of using it to simulate the dynamic type system is to set it directly to the type of the variable. It is recommended to use one of talloc() and talloc_array() (or its variants) to create the context as they set its name to the name of the given type automatically. If we have a context with such as a name, we can use two similar functions that do both the type check and the type cast for us: - talloc_get_type() - talloc_get_type_abort() @section dts-examples Examples The following example will show how generic programming with talloc is handled - if we provide invalid data to the callback, the program will be aborted. This is a sufficient reaction for such an error in most applications. @code void foo_callback(void *pvt) { struct foo *data = talloc_get_type_abort(pvt, struct foo); /* ... */ } int do_foo() { struct foo *data = talloc_zero(NULL, struct foo); /* ... */ return generic_function(foo_callback, data); } @endcode But what if we are creating a service application that should be running for the uptime of a server, we may want to abort the application during the development process (to make sure the error is not overlooked) and try to recover from the error in the customer release. This can be achieved by creating a custom abort function with a conditional build. @code void my_abort(const char *reason) { fprintf(stderr, "talloc abort: %s\n", reason); #ifdef ABORT_ON_TYPE_MISMATCH abort(); #endif } @endcode The usage of talloc_get_type_abort() would be then: @code talloc_set_abort_fn(my_abort); TALLOC_CTX *ctx = talloc_new(NULL); char *str = talloc_get_type_abort(ctx, char); if (str == NULL) { /* recovery code */ } /* talloc abort: ../src/main.c:25: Type mismatch: name[talloc_new: ../src/main.c:24] expected[char] */ @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_introduction.dox0000660000000000000000000000267600000000000023017 0ustar00rootroot00000000000000/** @page libtalloc_tutorial The Tutorial @section introduction Introduction Talloc is a hierarchical, reference counted memory pool system with destructors. It is built atop the C standard library and it defines a set of utility functions that altogether simplifies allocation and deallocation of data, especially for complex structures that contain many dynamically allocated elements such as strings and arrays. The main goals of this library are: removing the needs for creating a cleanup function for every complex structure, providing a logical organization of allocated memory blocks and reducing the likelihood of creating memory leaks in long-running applications. All of this is achieved by allocating memory in a hierarchical structure of talloc contexts such that deallocating one context recursively frees all of its descendants as well. @section main-features Main features - An open source project - A hierarchical memory model - Natural projection of data structures into the memory space - Simplifies memory management of large data structures - Automatic execution of a destructor before the memory is freed - Simulates a dynamic type system - Implements a transparent memory pool @section toc Table of contents: @subpage libtalloc_context @subpage libtalloc_stealing @subpage libtalloc_dts @subpage libtalloc_destructors @subpage libtalloc_pools @subpage libtalloc_debugging @subpage libtalloc_bestpractices @subpage libtalloc_threads */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_pools.dox0000660000000000000000000000711400000000000021422 0ustar00rootroot00000000000000/** @page libtalloc_pools Chapter 5: Memory pools @section pools Memory pools Allocation of a new memory is an expensive operation and large programs can contain thousands of calls of malloc() for a single computation, where every call allocates only a very small amount of the memory. This can result in an undesirable slowdown of the application. We can avoid this slowdown by decreasing the number of malloc() calls by using a memory pool. A memory pool is a preallocated memory space with a fixed size. If we need to allocate new data we will take the desired amount of the memory from the pool instead of requesting a new memory from the system. This is done by creating a pointer that points inside the preallocated memory. Such a pool must not be reallocated as it would change its location - pointers that were pointing inside the pool would become invalid. Therefore, a memory pool requires a very good estimate of the required memory space. The talloc library contains its own implementation of a memory pool. It is highly transparent for the programmer. The only thing that needs to be done is an initialization of a new pool context using talloc_pool() - which can be used in the same way as any other context. Refactoring of existing code (that uses talloc) to take the advantage of a memory pool is quite simple due to the following properties of the pool context: - if we are allocating data on a pool context, it takes the desired amount of memory from the pool, - if the context is a descendant of the pool context, it takes the space from the pool as well, - if the pool does not have sufficient portion of memory left, it will create a new non-pool context, leaving the pool intact @code /* allocate 1KiB in a pool */ TALLOC_CTX *pool_ctx = talloc_pool(NULL, 1024); /* Take 512B from the pool, 512B is left there */ void *ptr = talloc_size(pool_ctx, 512); /* 1024B > 512B, this will create new talloc chunk outside the pool */ void *ptr2 = talloc_size(ptr, 1024); /* The pool still contains 512 free bytes * this will take 200B from them. */ void *ptr3 = talloc_size(ptr, 200); /* This will destroy context 'ptr3' but the memory * is not freed, the available space in the pool * will increase to 512B. */ talloc_free(ptr3); /* This will free memory taken by 'pool_ctx' * and 'ptr2' as well. */ talloc_free(pool_ctx); @endcode The above given is very convenient, but there is one big issue to be kept in mind. If the parent of a talloc pool child is changed to a parent that is outside of this pool, the whole pool memory will not be freed until the child is freed. For this reason we must be very careful when stealing a descendant of a pool context. @code TALLOC_CTX *mem_ctx = talloc_new(NULL); TALLOC_CTX *pool_ctx = talloc_pool(NULL, 1024); struct foo *foo = talloc(pool_ctx, struct foo); /* mem_ctx is not in the pool */ talloc_steal(mem_ctx, foo); /* pool_ctx is marked as freed but the memory is not deallocated, accessing the pool_ctx again will cause an error */ talloc_free(pool_ctx); /* This deallocates the pool_ctx. */ talloc_free(mem_ctx); @endcode It may often be better to copy the memory we want instead of stealing it to avoid this problem. If we do not need to retain the context name (to keep the type information), we can use talloc_memdup() to do this. Copying the memory out of the pool may, however, discard all the performance boost given by the pool, depending on the size of the copied memory. Therefore, the code should be well profiled before taking this path. In general, the golden rule is: if we need to steal from the pool context, we should not use a pool context. */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_stealing.dox0000660000000000000000000000362400000000000022076 0ustar00rootroot00000000000000/** @page libtalloc_stealing Chapter 2: Stealing a context @section stealing Stealing a context Talloc has the ability to change the parent of a talloc context to another one. This operation is commonly referred to as stealing and it is one of the most important actions performed with talloc contexts. Stealing a context is necessary if we want the pointer to outlive the context it is created on. This has many possible use cases, for instance stealing a result of a database search to an in-memory cache context, changing the parent of a field of a generic structure to a more specific one or vice-versa. The most common scenario, at least in Samba, is to steal output data from a function-specific context to the output context given as an argument of that function. @code struct foo { char *a1; char *a2; char *a3; }; struct bar { char *wurst; struct foo *foo; }; struct foo *foo = talloc_zero(ctx, struct foo); foo->a1 = talloc_strdup(foo, "a1"); foo->a2 = talloc_strdup(foo, "a2"); foo->a3 = talloc_strdup(foo, "a3"); struct bar *bar = talloc_zero(NULL, struct bar); /* change parent of foo from ctx to bar */ bar->foo = talloc_steal(bar, foo); /* or do the same but assign foo = NULL */ bar->foo = talloc_move(bar, &foo); @endcode The talloc_move() function is similar to the talloc_steal() function but additionally sets the source pointer to NULL. In general, the source pointer itself is not changed (it only replaces the parent in the meta data). But the common usage is that the result is assigned to another variable, thus further accessing the pointer from the original variable should be avoided unless it is necessary. In this case talloc_move() is the preferred way of stealing a context. Additionally sets the source pointer to NULL, thus.protects the pointer from being accidentally freed and accessed using the old variable after its parent has been changed. @image html stealing.png */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doc/tutorial_threads.dox0000660000000000000000000001123000000000000021712 0ustar00rootroot00000000000000/** @page libtalloc_threads Chapter 8: Using threads with talloc @section Talloc and thread safety The talloc library is not internally thread-safe, in that accesses to variables on a talloc context are not controlled by mutexes or other thread-safe primitives. However, so long as talloc_disable_null_tracking() is called from the main thread to disable global variable access within talloc, then each thread can safely use its own top level talloc context allocated off the NULL context. For example: @code static void *thread_fn(void *arg) { const char *ctx_name = (const char *)arg; /* * Create a new top level talloc hierarchy in * this thread. */ void *top_ctx = talloc_named_const(NULL, 0, "top"); if (top_ctx == NULL) { return NULL; } sub_ctx = talloc_named_const(top_ctx, 100, ctx_name); if (sub_ctx == NULL) { return NULL; } /* * Do more processing/talloc calls on top_ctx * and its children. */ ...... talloc_free(top_ctx); return value; } @endcode is a perfectly safe use of talloc within a thread. The problem comes when one thread wishes to move some memory allocated on its local top level talloc context to another thread. Care must be taken to add data access exclusion to prevent memory corruption. One method would be to lock a mutex before any talloc call on each thread, but this would push the burden of total talloc thread-safety on the poor user of the library. A much easier way to transfer talloced memory between threads is by the use of an intermediate, mutex locked, intermediate variable. An example of this is below - taken from test code inside the talloc testsuite. The main thread creates 1000 sub-threads, and then accepts the transfer of some thread-talloc'ed memory onto its top level context from each thread in turn. A pthread mutex and condition variable are used to synchronize the transfer via the intermediate_ptr variable. @code /* Required sync variables. */ static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER; /* Intermediate talloc pointer for transfer. */ static void *intermediate_ptr; /* Subthread. */ static void *thread_fn(void *arg) { int ret; const char *ctx_name = (const char *)arg; void *sub_ctx = NULL; /* * Do stuff that creates a new talloc hierarchy in * this thread. */ void *top_ctx = talloc_named_const(NULL, 0, "top"); if (top_ctx == NULL) { return NULL; } sub_ctx = talloc_named_const(top_ctx, 100, ctx_name); if (sub_ctx == NULL) { return NULL; } /* * Now transfer a pointer from our hierarchy * onto the intermediate ptr. */ ret = pthread_mutex_lock(&mtx); if (ret != 0) { talloc_free(top_ctx); return NULL; } /* Wait for intermediate_ptr to be free. */ while (intermediate_ptr != NULL) { ret = pthread_cond_wait(&condvar, &mtx); if (ret != 0) { talloc_free(top_ctx); return NULL; } } /* and move our memory onto it from our toplevel hierarchy. */ intermediate_ptr = talloc_move(NULL, &sub_ctx); /* Tell the main thread it's ready for pickup. */ pthread_cond_broadcast(&condvar); pthread_mutex_unlock(&mtx); talloc_free(top_ctx); return NULL; } /* Main thread. */ #define NUM_THREADS 1000 static bool test_pthread_talloc_passing(void) { int i; int ret; char str_array[NUM_THREADS][20]; pthread_t thread_id; void *mem_ctx; /* * Important ! Null tracking breaks threaded talloc. * It *must* be turned off. */ talloc_disable_null_tracking(); /* Main thread toplevel context. */ mem_ctx = talloc_named_const(NULL, 0, "toplevel"); if (mem_ctx == NULL) { return false; } /* * Spin off NUM_THREADS threads. * They will use their own toplevel contexts. */ for (i = 0; i < NUM_THREADS; i++) { (void)snprintf(str_array[i], 20, "thread:%d", i); if (str_array[i] == NULL) { return false; } ret = pthread_create(&thread_id, NULL, thread_fn, str_array[i]); if (ret != 0) { return false; } } /* Now wait for NUM_THREADS transfers of the talloc'ed memory. */ for (i = 0; i < NUM_THREADS; i++) { ret = pthread_mutex_lock(&mtx); if (ret != 0) { talloc_free(mem_ctx); return false; } /* Wait for intermediate_ptr to have our data. */ while (intermediate_ptr == NULL) { ret = pthread_cond_wait(&condvar, &mtx); if (ret != 0) { talloc_free(mem_ctx); return false; } } /* and move it onto our toplevel hierarchy. */ (void)talloc_move(mem_ctx, &intermediate_ptr); /* Tell the sub-threads we're ready for another. */ pthread_cond_broadcast(&condvar); pthread_mutex_unlock(&mtx); } /* Dump the hierarchy. */ talloc_report(mem_ctx, stdout); talloc_free(mem_ctx); return true; } @endcode */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/doxy.config0000660000000000000000000022571600000000000017246 0ustar00rootroot00000000000000# Doxyfile 1.8.0 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = talloc # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 2.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = YES # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = YES # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. The create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = . \ doc # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.cpp \ *.cc \ *.c \ *.h \ *.hh \ *.hpp \ *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */.git/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = doc # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # style sheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = NO # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NONE # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = DOXYGEN \ PRINTF_ATTRIBUTE(x,y)= # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/man/talloc.3.xml0000660000000000000000000010702000000000000017773 0ustar00rootroot00000000000000 2015-04-10 talloc 3 Samba System Administration tools 4.0 talloc hierarchical reference counted memory pool system with destructors #include <talloc.h> DESCRIPTION If you are used to talloc from Samba3 then please read this carefully, as talloc has changed a lot. The new talloc is a hierarchical, reference counted memory pool system with destructors. Quite a mouthful really, but not too bad once you get used to it. Perhaps the biggest change from Samba3 is that there is no distinction between a "talloc context" and a "talloc pointer". Any pointer returned from talloc() is itself a valid talloc context. This means you can do this: struct foo *X = talloc(mem_ctx, struct foo); X->name = talloc_strdup(X, "foo"); and the pointer X->name would be a "child" of the talloc context X which is itself a child of mem_ctx. So if you do talloc_free(mem_ctx) then it is all destroyed, whereas if you do talloc_free(X) then just X and X->name are destroyed, and if you do talloc_free(X->name) then just the name element of X is destroyed. If you think about this, then what this effectively gives you is an n-ary tree, where you can free any part of the tree with talloc_free(). If you find this confusing, then I suggest you run the testsuite program to watch talloc in action. You may also like to add your own tests to testsuite.c to clarify how some particular situation is handled. TALLOC API The following is a complete guide to the talloc API. Read it all at least twice. (type *)talloc(const void *ctx, type); The talloc() macro is the core of the talloc library. It takes a memory ctx and a type, and returns a pointer to a new area of memory of the given type. The returned pointer is itself a talloc context, so you can use it as the ctx argument to more calls to talloc() if you wish. The returned pointer is a "child" of the supplied context. This means that if you talloc_free() the ctx then the new child disappears as well. Alternatively you can free just the child. The ctx argument to talloc() can be NULL, in which case a new top level context is created. void *talloc_size(const void *ctx, size_t size); The function talloc_size() should be used when you don't have a convenient type to pass to talloc(). Unlike talloc(), it is not type safe (as it returns a void *), so you are on your own for type checking. (typeof(ptr)) talloc_ptrtype(const void *ctx, ptr); The talloc_ptrtype() macro should be used when you have a pointer and want to allocate memory to point at with this pointer. When compiling with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() and talloc_get_name() will return the current location in the source file. and not the type. int talloc_free(void *ptr); The talloc_free() function frees a piece of talloc memory, and all its children. You can call talloc_free() on any pointer returned by talloc(). The return value of talloc_free() indicates success or failure, with 0 returned for success and -1 for failure. The only possible failure condition is if ptr had a destructor attached to it and the destructor returned -1. See talloc_set_destructor() for details on destructors. If this pointer has an additional parent when talloc_free() is called then the memory is not actually released, but instead the most recently established parent is destroyed. See talloc_reference() for details on establishing additional parents. For more control on which parent is removed, see talloc_unlink(). talloc_free() operates recursively on its children. From the 2.0 version of talloc, as a special case, talloc_free() is refused on pointers that have more than one parent, as talloc would have no way of knowing which parent should be removed. To free a pointer that has more than one parent please use talloc_unlink(). To help you find problems in your code caused by this behaviour, if you do try and free a pointer with more than one parent then the talloc logging function will be called to give output like this: ERROR: talloc_free with references at some_dir/source/foo.c:123 reference at some_dir/source/other.c:325 reference at some_dir/source/third.c:121 Please see the documentation for talloc_set_log_fn() and talloc_set_log_stderr() for more information on talloc logging functions. void *talloc_reference(const void *ctx, const void *ptr); The talloc_reference() function makes ctx an additional parent of ptr. The return value of talloc_reference() is always the original pointer ptr, unless talloc ran out of memory in creating the reference in which case it will return NULL (each additional reference consumes around 48 bytes of memory on intel x86 platforms). If ptr is NULL, then the function is a no-op, and simply returns NULL. After creating a reference you can free it in one of the following ways: you can talloc_free() any parent of the original pointer. That will reduce the number of parents of this pointer by 1, and will cause this pointer to be freed if it runs out of parents. you can talloc_free() the pointer itself if it has at maximum one parent. This behaviour has been changed since the release of version 2.0. Further information in the description of "talloc_free". For more control on which parent to remove, see talloc_unlink(). int talloc_unlink(const void *ctx, void *ptr); The talloc_unlink() function removes a specific parent from ptr. The ctx passed must either be a context used in talloc_reference() with this pointer, or must be a direct parent of ptr. Note that if the parent has already been removed using talloc_free() then this function will fail and will return -1. Likewise, if ptr is NULL, then the function will make no modifications and return -1. Usually you can just use talloc_free() instead of talloc_unlink(), but sometimes it is useful to have the additional control on which parent is removed. void talloc_set_destructor(const void *ptr, int (*destructor)(void *)); The function talloc_set_destructor() sets the destructor for the pointer ptr. A destructor is a function that is called when the memory used by a pointer is about to be released. The destructor receives ptr as an argument, and should return 0 for success and -1 for failure. The destructor can do anything it wants to, including freeing other pieces of memory. A common use for destructors is to clean up operating system resources (such as open file descriptors) contained in the structure the destructor is placed on. You can only place one destructor on a pointer. If you need more than one destructor then you can create a zero-length child of the pointer and place an additional destructor on that. To remove a destructor call talloc_set_destructor() with NULL for the destructor. If your destructor attempts to talloc_free() the pointer that it is the destructor for then talloc_free() will return -1 and the free will be ignored. This would be a pointless operation anyway, as the destructor is only called when the memory is just about to go away. int talloc_increase_ref_count(const void *<emphasis role="italic">ptr</emphasis>); The talloc_increase_ref_count(ptr) function is exactly equivalent to: talloc_reference(NULL, ptr); You can use either syntax, depending on which you think is clearer in your code. It returns 0 on success and -1 on failure. size_t talloc_reference_count(const void *<emphasis role="italic">ptr</emphasis>); Return the number of references to the pointer. void talloc_set_name(const void *ptr, const char *fmt, ...); Each talloc pointer has a "name". The name is used principally for debugging purposes, although it is also possible to set and get the name on a pointer in as a way of "marking" pointers in your code. The main use for names on pointer is for "talloc reports". See talloc_report_depth_cb(), talloc_report_depth_file(), talloc_report() talloc_report() and talloc_report_full() for details. Also see talloc_enable_leak_report() and talloc_enable_leak_report_full(). The talloc_set_name() function allocates memory as a child of the pointer. It is logically equivalent to: talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...)); Note that multiple calls to talloc_set_name() will allocate more memory without releasing the name. All of the memory is released when the ptr is freed using talloc_free(). void talloc_set_name_const(const void *<emphasis role="italic">ptr</emphasis>, const char *<emphasis role="italic">name</emphasis>); The function talloc_set_name_const() is just like talloc_set_name(), but it takes a string constant, and is much faster. It is extensively used by the "auto naming" macros, such as talloc_p(). This function does not allocate any memory. It just copies the supplied pointer into the internal representation of the talloc ptr. This means you must not pass a name pointer to memory that will disappear before ptr is freed with talloc_free(). void *talloc_named(const void *<emphasis role="italic">ctx</emphasis>, size_t <emphasis role="italic">size</emphasis>, const char *<emphasis role="italic">fmt</emphasis>, ...); The talloc_named() function creates a named talloc pointer. It is equivalent to: ptr = talloc_size(ctx, size); talloc_set_name(ptr, fmt, ....); void *talloc_named_const(const void *<emphasis role="italic">ctx</emphasis>, size_t <emphasis role="italic">size</emphasis>, const char *<emphasis role="italic">name</emphasis>); This is equivalent to: ptr = talloc_size(ctx, size); talloc_set_name_const(ptr, name); const char *talloc_get_name(const void *<emphasis role="italic">ptr</emphasis>); This returns the current name for the given talloc pointer, ptr. See talloc_set_name() for details. void *talloc_init(const char *<emphasis role="italic">fmt</emphasis>, ...); This function creates a zero length named talloc context as a top level context. It is equivalent to: talloc_named(NULL, 0, fmt, ...); void *talloc_new(void *<emphasis role="italic">ctx</emphasis>); This is a utility macro that creates a new memory context hanging off an existing context, automatically naming it "talloc_new: __location__" where __location__ is the source line it is called from. It is particularly useful for creating a new temporary working context. (<emphasis role="italic">type</emphasis> *)talloc_realloc(const void *<emphasis role="italic">ctx</emphasis>, void *<emphasis role="italic">ptr</emphasis>, <emphasis role="italic">type</emphasis>, <emphasis role="italic">count</emphasis>); The talloc_realloc() macro changes the size of a talloc pointer. It has the following equivalences: talloc_realloc(ctx, NULL, type, 1) ==> talloc(ctx, type); talloc_realloc(ctx, ptr, type, 0) ==> talloc_free(ptr); The ctx argument is only used if ptr is not NULL, otherwise it is ignored. talloc_realloc() returns the new pointer, or NULL on failure. The call will fail either due to a lack of memory, or because the pointer has more than one parent (see talloc_reference()). void *talloc_realloc_size(const void *ctx, void *ptr, size_t size); the talloc_realloc_size() function is useful when the type is not known so the type-safe talloc_realloc() cannot be used. TYPE *talloc_steal(const void *<emphasis role="italic">new_ctx</emphasis>, const TYPE *<emphasis role="italic">ptr</emphasis>); The talloc_steal() function changes the parent context of a talloc pointer. It is typically used when the context that the pointer is currently a child of is going to be freed and you wish to keep the memory for a longer time. The talloc_steal() function returns the pointer that you pass it. It does not have any failure modes. It is possible to produce loops in the parent/child relationship if you are not careful with talloc_steal(). No guarantees are provided as to your sanity or the safety of your data if you do this. Note that if you try and call talloc_steal() on a pointer that has more than one parent then the result is ambiguous. Talloc will choose to remove the parent that is currently indicated by talloc_parent() and replace it with the chosen parent. You will also get a message like this via the talloc logging functions: WARNING: talloc_steal with references at some_dir/source/foo.c:123 reference at some_dir/source/other.c:325 reference at some_dir/source/third.c:121 To unambiguously change the parent of a pointer please see the function talloc_reparent(). See the talloc_set_log_fn() documentation for more information on talloc logging. TYPE *talloc_reparent(const void *<emphasis role="italic">old_parent</emphasis>, const void *<emphasis role="italic">new_parent</emphasis>, const TYPE *<emphasis role="italic">ptr</emphasis>); The talloc_reparent() function changes the parent context of a talloc pointer. It is typically used when the context that the pointer is currently a child of is going to be freed and you wish to keep the memory for a longer time. The talloc_reparent() function returns the pointer that you pass it. It does not have any failure modes. The difference between talloc_reparent() and talloc_steal() is that talloc_reparent() can specify which parent you wish to change. This is useful when a pointer has multiple parents via references. TYPE *talloc_move(const void *<emphasis role="italic">new_ctx</emphasis>, TYPE **<emphasis role="italic">ptr</emphasis>); The talloc_move() function is a wrapper around talloc_steal() which zeros the source pointer after the move. This avoids a potential source of bugs where a programmer leaves a pointer in two structures, and uses the pointer from the old structure after it has been moved to a new one. size_t talloc_total_size(const void *<emphasis role="italic">ptr</emphasis>); The talloc_total_size() function returns the total size in bytes used by this pointer and all child pointers. Mostly useful for debugging. Passing NULL is allowed, but it will only give a meaningful result if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. size_t talloc_total_blocks(const void *<emphasis role="italic">ptr</emphasis>); The talloc_total_blocks() function returns the total memory block count used by this pointer and all child pointers. Mostly useful for debugging. Passing NULL is allowed, but it will only give a meaningful result if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. void talloc_report(const void *ptr, FILE *f); The talloc_report() function prints a summary report of all memory used by ptr. One line of report is printed for each immediate child of ptr, showing the total memory and number of blocks used by that child. You can pass NULL for the pointer, in which case a report is printed for the top level memory context, but only if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. void talloc_report_full(const void *<emphasis role="italic">ptr</emphasis>, FILE *<emphasis role="italic">f</emphasis>); This provides a more detailed report than talloc_report(). It will recursively print the entire tree of memory referenced by the pointer. References in the tree are shown by giving the name of the pointer that is referenced. You can pass NULL for the pointer, in which case a report is printed for the top level memory context, but only if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. void talloc_report_depth_cb const void *ptr int depth int max_depth void (*callback)(const void *ptr, int depth, int max_depth, int is_ref, void *priv) void *priv This provides a more flexible reports than talloc_report(). It will recursively call the callback for the entire tree of memory referenced by the pointer. References in the tree are passed with is_ref = 1 and the pointer that is referenced. You can pass NULL for the pointer, in which case a report is printed for the top level memory context, but only if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. The recursion is stopped when depth >= max_depth. max_depth = -1 means only stop at leaf nodes. void talloc_report_depth_file const void *ptr int depth int max_depth FILE *f This provides a more flexible reports than talloc_report(). It will let you specify the depth and max_depth. void talloc_enable_leak_report(void); This enables calling of talloc_report(NULL, stderr) when the program exits. In Samba4 this is enabled by using the --leak-report command line option. For it to be useful, this function must be called before any other talloc function as it establishes a "null context" that acts as the top of the tree. If you don't call this function first then passing NULL to talloc_report() or talloc_report_full() won't give you the full tree printout. Here is a typical talloc report: talloc report on 'null_context' (total 267 bytes in 15 blocks) libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks iconv(UTF8,CP850) contains 42 bytes in 2 blocks libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks iconv(CP850,UTF8) contains 42 bytes in 2 blocks iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks void talloc_enable_leak_report_full(void); This enables calling of talloc_report_full(NULL, stderr) when the program exits. In Samba4 this is enabled by using the --leak-report-full command line option. For it to be useful, this function must be called before any other talloc function as it establishes a "null context" that acts as the top of the tree. If you don't call this function first then passing NULL to talloc_report() or talloc_report_full() won't give you the full tree printout. Here is a typical full report: full talloc report on 'root' (total 18 bytes in 8 blocks) p1 contains 18 bytes in 7 blocks (ref 0) r1 contains 13 bytes in 2 blocks (ref 0) reference to: p2 p2 contains 1 bytes in 1 blocks (ref 1) x3 contains 1 bytes in 1 blocks (ref 0) x2 contains 1 bytes in 1 blocks (ref 0) x1 contains 1 bytes in 1 blocks (ref 0) (<emphasis role="italic">type</emphasis> *)talloc_zero(const void *<emphasis role="italic">ctx</emphasis>, <emphasis role="italic">type</emphasis>); The talloc_zero() macro is equivalent to: ptr = talloc(ctx, type); if (ptr) memset(ptr, 0, sizeof(type)); void *talloc_zero_size(const void *<emphasis role="italic">ctx</emphasis>, size_t <emphasis role="italic">size</emphasis>) The talloc_zero_size() function is useful when you don't have a known type. void *talloc_memdup(const void *<emphasis role="italic">ctx</emphasis>, const void *<emphasis role="italic">p</emphasis>, size_t size); The talloc_memdup() function is equivalent to: ptr = talloc_size(ctx, size); if (ptr) memcpy(ptr, p, size); char *talloc_strdup(const void *<emphasis role="italic">ctx</emphasis>, const char *<emphasis role="italic">p</emphasis>); The talloc_strdup() function is equivalent to: ptr = talloc_size(ctx, strlen(p)+1); if (ptr) memcpy(ptr, p, strlen(p)+1); This function sets the name of the new pointer to the passed string. This is equivalent to: talloc_set_name_const(ptr, ptr) char *talloc_strndup(const void *<emphasis role="italic">t</emphasis>, const char *<emphasis role="italic">p</emphasis>, size_t <emphasis role="italic">n</emphasis>); The talloc_strndup() function is the talloc equivalent of the C library function strndup(3). This function sets the name of the new pointer to the passed string. This is equivalent to: talloc_set_name_const(ptr, ptr) char *talloc_vasprintf(const void *<emphasis role="italic">t</emphasis>, const char *<emphasis role="italic">fmt</emphasis>, va_list <emphasis role="italic">ap</emphasis>); The talloc_vasprintf() function is the talloc equivalent of the C library function vasprintf(3). This function sets the name of the new pointer to the new string. This is equivalent to: talloc_set_name_const(ptr, ptr) char *talloc_asprintf(const void *<emphasis role="italic">t</emphasis>, const char *<emphasis role="italic">fmt</emphasis>, ...); The talloc_asprintf() function is the talloc equivalent of the C library function asprintf(3). This function sets the name of the new pointer to the passed string. This is equivalent to: talloc_set_name_const(ptr, ptr) char *talloc_asprintf_append(char *s, const char *fmt, ...); The talloc_asprintf_append() function appends the given formatted string to the given string. This function sets the name of the new pointer to the new string. This is equivalent to: talloc_set_name_const(ptr, ptr) (type *)talloc_array(const void *ctx, type, unsigned int count); The talloc_array() macro is equivalent to: (type *)talloc_size(ctx, sizeof(type) * count); except that it provides integer overflow protection for the multiply, returning NULL if the multiply overflows. void *talloc_array_size(const void *ctx, size_t size, unsigned int count); The talloc_array_size() function is useful when the type is not known. It operates in the same way as talloc_array(), but takes a size instead of a type. (typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, unsigned int count); The talloc_ptrtype() macro should be used when you have a pointer to an array and want to allocate memory of an array to point at with this pointer. When compiling with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size() and talloc_get_name() will return the current location in the source file. and not the type. void *talloc_realloc_fn(const void *ctx, void *ptr, size_t size) This is a non-macro version of talloc_realloc(), which is useful as libraries sometimes want a realloc function pointer. A realloc(3) implementation encapsulates the functionality of malloc(3), free(3) and realloc(3) in one call, which is why it is useful to be able to pass around a single function pointer. void *talloc_autofree_context(void); This is a handy utility function that returns a talloc context which will be automatically freed on program exit. This can be used to reduce the noise in memory leak reports. void *talloc_check_name(const void *ptr, const char *name); This function checks if a pointer has the specified name. If it does then the pointer is returned. It it doesn't then NULL is returned. (type *)talloc_get_type(const void *ptr, type); This macro allows you to do type checking on talloc pointers. It is particularly useful for void* private pointers. It is equivalent to this: (type *)talloc_check_name(ptr, #type) talloc_set_type(const void *ptr, type); This macro allows you to force the name of a pointer to be a particular type. This can be used in conjunction with talloc_get_type() to do type checking on void* pointers. It is equivalent to this: talloc_set_name_const(ptr, #type) talloc_set_log_fn(void (*log_fn)(const char *message)); This function sets a logging function that talloc will use for warnings and errors. By default talloc will not print any warnings or errors. talloc_set_log_stderr(void); This sets the talloc log function to write log messages to stderr PERFORMANCE All the additional features of talloc(3) over malloc(3) do come at a price. We have a simple performance test in Samba4 that measures talloc() versus malloc() performance, and it seems that talloc() is about 10% slower than malloc() on my x86 Debian Linux box. For Samba, the great reduction in code complexity that we get by using talloc makes this worthwhile, especially as the total overhead of talloc/malloc in Samba is already quite small. SEE ALSO malloc(3), strndup(3), vasprintf(3), asprintf(3), AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. COPYRIGHT/LICENSE Copyright (C) Andrew Tridgell 2004 This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see http://www.gnu.org/licenses/. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/pytalloc-util.pc.in0000660000000000000000000000053100000000000020611 0ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: pytalloc-util@PYTHON_SO_ABI_FLAG@ Description: Utility functions for using talloc objects with Python Version: @TALLOC_VERSION@ Libs: @LIB_RPATH@ -L${libdir} -lpytalloc-util@PYTHON_LIBNAME_SO_ABI_FLAG@ Cflags: -I${includedir} URL: http://talloc.samba.org/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/talloc/pytalloc.c0000660000000000000000000002125600000000000017060 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Python Talloc Module Copyright (C) Jelmer Vernooij 2010-2011 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include #include #include "pytalloc_private.h" static PyTypeObject TallocObject_Type; /* print a talloc tree report for a talloc python object */ static PyObject *pytalloc_report_full(PyObject *self, PyObject *args) { PyObject *py_obj = Py_None; if (!PyArg_ParseTuple(args, "|O", &py_obj)) return NULL; if (py_obj == Py_None) { talloc_report_full(NULL, stdout); } else { talloc_report_full(pytalloc_get_mem_ctx(py_obj), stdout); } Py_RETURN_NONE; } /* enable null tracking */ static PyObject *pytalloc_enable_null_tracking(PyObject *self, PyObject *Py_UNUSED(ignored)) { talloc_enable_null_tracking(); Py_RETURN_NONE; } /* return the number of talloc blocks */ static PyObject *pytalloc_total_blocks(PyObject *self, PyObject *args) { PyObject *py_obj = Py_None; if (!PyArg_ParseTuple(args, "|O", &py_obj)) return NULL; if (py_obj == Py_None) { return PyLong_FromLong(talloc_total_blocks(NULL)); } return PyLong_FromLong(talloc_total_blocks(pytalloc_get_mem_ctx(py_obj))); } static PyMethodDef talloc_methods[] = { { "report_full", (PyCFunction)pytalloc_report_full, METH_VARARGS, "show a talloc tree for an object"}, { "enable_null_tracking", (PyCFunction)pytalloc_enable_null_tracking, METH_NOARGS, "enable tracking of the NULL object"}, { "total_blocks", (PyCFunction)pytalloc_total_blocks, METH_VARARGS, "return talloc block count"}, {0} }; /** * Default (but only slightly more useful than the default) implementation of Repr(). */ static PyObject *pytalloc_default_repr(PyObject *obj) { pytalloc_Object *talloc_obj = (pytalloc_Object *)obj; PyTypeObject *type = (PyTypeObject*)PyObject_Type(obj); return PyUnicode_FromFormat("<%s talloc object at %p>", type->tp_name, talloc_obj->ptr); } /** * Simple dealloc for talloc-wrapping PyObjects */ static void pytalloc_dealloc(PyObject* self) { pytalloc_Object *obj = (pytalloc_Object *)self; assert(talloc_unlink(NULL, obj->talloc_ctx) != -1); obj->talloc_ctx = NULL; self->ob_type->tp_free(self); } /** * Default (but only slightly more useful than the default) implementation of cmp. */ #if PY_MAJOR_VERSION >= 3 static PyObject *pytalloc_default_richcmp(PyObject *obj1, PyObject *obj2, int op) { void *ptr1; void *ptr2; if (Py_TYPE(obj1) == Py_TYPE(obj2)) { /* When types match, compare pointers */ ptr1 = pytalloc_get_ptr(obj1); ptr2 = pytalloc_get_ptr(obj2); } else if (PyObject_TypeCheck(obj2, &TallocObject_Type)) { /* Otherwise, compare types */ ptr1 = Py_TYPE(obj1); ptr2 = Py_TYPE(obj2); } else { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } switch (op) { case Py_EQ: return PyBool_FromLong(ptr1 == ptr2); case Py_NE: return PyBool_FromLong(ptr1 != ptr2); case Py_LT: return PyBool_FromLong(ptr1 < ptr2); case Py_GT: return PyBool_FromLong(ptr1 > ptr2); case Py_LE: return PyBool_FromLong(ptr1 <= ptr2); case Py_GE: return PyBool_FromLong(ptr1 >= ptr2); } Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } #else static int pytalloc_default_cmp(PyObject *_obj1, PyObject *_obj2) { pytalloc_Object *obj1 = (pytalloc_Object *)_obj1, *obj2 = (pytalloc_Object *)_obj2; if (obj1->ob_type != obj2->ob_type) return ((char *)obj1->ob_type - (char *)obj2->ob_type); return ((char *)pytalloc_get_ptr(obj1) - (char *)pytalloc_get_ptr(obj2)); } #endif static PyTypeObject TallocObject_Type = { .tp_name = "talloc.Object", .tp_doc = "Python wrapper for a talloc-maintained object.", .tp_basicsize = sizeof(pytalloc_Object), .tp_dealloc = (destructor)pytalloc_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, .tp_repr = pytalloc_default_repr, #if PY_MAJOR_VERSION >= 3 .tp_richcompare = pytalloc_default_richcmp, #else .tp_compare = pytalloc_default_cmp, #endif }; /** * Default (but only slightly more useful than the default) implementation of Repr(). */ static PyObject *pytalloc_base_default_repr(PyObject *obj) { pytalloc_BaseObject *talloc_obj = (pytalloc_BaseObject *)obj; PyTypeObject *type = (PyTypeObject*)PyObject_Type(obj); return PyUnicode_FromFormat("<%s talloc based object at %p>", type->tp_name, talloc_obj->ptr); } /** * Simple dealloc for talloc-wrapping PyObjects */ static void pytalloc_base_dealloc(PyObject* self) { pytalloc_BaseObject *obj = (pytalloc_BaseObject *)self; assert(talloc_unlink(NULL, obj->talloc_ctx) != -1); obj->talloc_ctx = NULL; self->ob_type->tp_free(self); } /** * Default (but only slightly more useful than the default) implementation of cmp. */ #if PY_MAJOR_VERSION >= 3 static PyObject *pytalloc_base_default_richcmp(PyObject *obj1, PyObject *obj2, int op) { void *ptr1; void *ptr2; if (Py_TYPE(obj1) == Py_TYPE(obj2)) { /* When types match, compare pointers */ ptr1 = pytalloc_get_ptr(obj1); ptr2 = pytalloc_get_ptr(obj2); } else if (PyObject_TypeCheck(obj2, &TallocObject_Type)) { /* Otherwise, compare types */ ptr1 = Py_TYPE(obj1); ptr2 = Py_TYPE(obj2); } else { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } switch (op) { case Py_EQ: return PyBool_FromLong(ptr1 == ptr2); case Py_NE: return PyBool_FromLong(ptr1 != ptr2); case Py_LT: return PyBool_FromLong(ptr1 < ptr2); case Py_GT: return PyBool_FromLong(ptr1 > ptr2); case Py_LE: return PyBool_FromLong(ptr1 <= ptr2); case Py_GE: return PyBool_FromLong(ptr1 >= ptr2); } Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } #else static int pytalloc_base_default_cmp(PyObject *_obj1, PyObject *_obj2) { pytalloc_BaseObject *obj1 = (pytalloc_BaseObject *)_obj1, *obj2 = (pytalloc_BaseObject *)_obj2; if (obj1->ob_type != obj2->ob_type) return ((char *)obj1->ob_type - (char *)obj2->ob_type); return ((char *)pytalloc_get_ptr(obj1) - (char *)pytalloc_get_ptr(obj2)); } #endif static PyTypeObject TallocBaseObject_Type = { .tp_name = "talloc.BaseObject", .tp_doc = "Python wrapper for a talloc-maintained object.", .tp_basicsize = sizeof(pytalloc_BaseObject), .tp_dealloc = (destructor)pytalloc_base_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, .tp_repr = pytalloc_base_default_repr, #if PY_MAJOR_VERSION >= 3 .tp_richcompare = pytalloc_base_default_richcmp, #else .tp_compare = pytalloc_base_default_cmp, #endif }; static PyTypeObject TallocGenericObject_Type = { .tp_name = "talloc.GenericObject", .tp_doc = "Python wrapper for a talloc-maintained object.", .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, .tp_base = &TallocBaseObject_Type, .tp_basicsize = sizeof(pytalloc_BaseObject), }; #define MODULE_DOC PyDoc_STR("Python wrapping of talloc-maintained objects.") #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, .m_name = "talloc", .m_doc = MODULE_DOC, .m_size = -1, .m_methods = talloc_methods, }; #endif static PyObject *module_init(void); static PyObject *module_init(void) { PyObject *m; if (PyType_Ready(&TallocObject_Type) < 0) return NULL; if (PyType_Ready(&TallocBaseObject_Type) < 0) return NULL; if (PyType_Ready(&TallocGenericObject_Type) < 0) return NULL; #if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&moduledef); #else m = Py_InitModule3("talloc", talloc_methods, MODULE_DOC); #endif if (m == NULL) return NULL; Py_INCREF(&TallocObject_Type); if (PyModule_AddObject(m, "Object", (PyObject *)&TallocObject_Type)) { goto err; } Py_INCREF(&TallocBaseObject_Type); if (PyModule_AddObject(m, "BaseObject", (PyObject *)&TallocBaseObject_Type)) { goto err; } Py_INCREF(&TallocGenericObject_Type); if (PyModule_AddObject(m, "GenericObject", (PyObject *)&TallocGenericObject_Type)) { goto err; } return m; err: Py_DECREF(m); return NULL; } #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit_talloc(void); PyMODINIT_FUNC PyInit_talloc(void) { return module_init(); } #else void inittalloc(void); void inittalloc(void) { module_init(); } #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/talloc/pytalloc.h0000660000000000000000000000671500000000000017070 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Samba utility functions Copyright (C) Jelmer Vernooij 2008 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef _PYTALLOC_H_ #define _PYTALLOC_H_ #include #include typedef struct { PyObject_HEAD TALLOC_CTX *talloc_ctx; void *ptr; /* eg the array element */ } pytalloc_Object; /* Return the PyTypeObject for pytalloc_Object. Returns a borrowed reference. */ _PUBLIC_ PyTypeObject *pytalloc_GetObjectType(void); /* Return the PyTypeObject for pytalloc_BaseObject. Returns a borrowed reference. */ _PUBLIC_ PyTypeObject *pytalloc_GetBaseObjectType(void); /* Check whether a specific object is a talloc Object. */ _PUBLIC_ int pytalloc_Check(PyObject *); _PUBLIC_ int pytalloc_BaseObject_check(PyObject *); _PUBLIC_ int _pytalloc_check_type(PyObject *py_obj, const char *type_name); #define pytalloc_check_type(py_obj, type) \ _pytalloc_check_type((PyObject *)(py_obj), #type) /* Retrieve the pointer for a pytalloc_object. Like talloc_get_type() * but for pytalloc_Objects. */ _PUBLIC_ void *_pytalloc_get_type(PyObject *py_obj, const char *type_name); #define pytalloc_get_type(py_obj, type) ((type *)_pytalloc_get_type((PyObject *)(py_obj), #type)) _PUBLIC_ void *_pytalloc_get_ptr(PyObject *py_obj); #define pytalloc_get_ptr(py_obj) _pytalloc_get_ptr((PyObject *)(py_obj)) _PUBLIC_ TALLOC_CTX *_pytalloc_get_mem_ctx(PyObject *py_obj); #define pytalloc_get_mem_ctx(py_obj) _pytalloc_get_mem_ctx((PyObject *)(py_obj)) _PUBLIC_ const char *_pytalloc_get_name(PyObject *py_obj); #define pytalloc_get_name(py_obj) _pytalloc_get_name((PyObject *)(py_obj)) _PUBLIC_ PyObject *pytalloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr); _PUBLIC_ PyObject *pytalloc_steal(PyTypeObject *py_type, void *ptr); _PUBLIC_ PyObject *pytalloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr); #define pytalloc_reference(py_type, talloc_ptr) pytalloc_reference_ex(py_type, talloc_ptr, talloc_ptr) #define pytalloc_new(type, typeobj) pytalloc_steal(typeobj, talloc_zero(NULL, type)) /* * Wrap a generic talloc pointer into a talloc.GenericObject, * this is a subclass of talloc.BaseObject. */ _PUBLIC_ PyObject *pytalloc_GenericObject_steal_ex(TALLOC_CTX *mem_ctx, void *ptr); #define pytalloc_GenericObject_steal(talloc_ptr) \ pytalloc_GenericObject_steal_ex(talloc_ptr, talloc_ptr) _PUBLIC_ PyObject *pytalloc_GenericObject_reference_ex(TALLOC_CTX *mem_ctx, void *ptr); #define pytalloc_GenericObject_reference(talloc_ptr) \ pytalloc_GenericObject_reference_ex(talloc_ptr, talloc_ptr) _PUBLIC_ size_t pytalloc_BaseObject_size(void); _PUBLIC_ int pytalloc_BaseObject_PyType_Ready(PyTypeObject *type); #endif /* _PYTALLOC_H_ */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/pytalloc_guide.txt0000660000000000000000000002536500000000000020637 0ustar00rootroot00000000000000Using talloc in Samba4 ====================== .. contents:: Jelmer Vernooij August 2013 The most current version of this document is available at http://samba.org/ftp/unpacked/talloc/pytalloc_guide.txt pytalloc is a small library that provides glue for wrapping talloc-allocated objects from C in Python objects. What is pytalloc, and what is it not? ------------------------------------- pytalloc is merely a helper library - it provides a convenient base type object for objects that wrap talloc-maintained memory in C. It won't write your bindings for you but it will make it easier to write C bindings that involve talloc, and take away some of the boiler plate. Python 3 -------- pytalloc can be used with Python 3. Usage from Python extension remains the same, but for the C utilities, the library to link to is tagged with Python's PEP3149 ABI tag, for example "pytalloc.cpython34m". To make a build for Python 3, configure with PYTHON=/usr/bin/python3. . =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- pytalloc_Object / pytalloc_BaseObject This is the new base class that all Python objects that wrap talloc pointers derive from. It is itself a subclass of the "Object" type that all objects in Python derive from. Note that you will almost never create objects of the pytalloc_Object type itself, as they are just opaque pointers that can not be accessed from Python. A common pattern is other objects that subclass pytalloc_Object and rely on it for their memory management. Each `pytalloc_Object` wraps two core of information - a talloc context and a pointer. The pointer is the actual data that is wrapped. The talloc context is used for memory management purposes only; when the wrapping Python object goes away, it unlinks the talloc context. The talloc context pointer and the ptr can (and often do) have the same value. Each pytalloc_Object has a custom __repr__ implementation that describes that it is a talloc object and the location of the pointer it is wrapping. it also has a custom __cmp__/__eq__/__neq__ method that compares the pointers the object is wrapping rather than the objects themselves (since there can be multiple objects that wrap the same talloc pointer). It is preferred to use pytalloc_BaseObject as this implementation exposes less in the C ABI and correctly supports pointers in C arrays in the way needed by PIDL. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyTypeObject *pytalloc_GetObjectType(void) Obtain a pointer to the PyTypeObject for `pytalloc_Object`. The reference counter for the object will be NOT incremented, so the caller MUST NOT decrement it when it no longer needs it (eg by using `Py_DECREF`). =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyTypeObject *pytalloc_GetBaseObjectType(void) Obtain a pointer to the PyTypeObject for `pytalloc_BaseObject`. The reference counter for the object will be NOT incremented, so the caller MUST NOT decrement it when it no longer needs it (eg by using `Py_DECREF`). =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- int pytalloc_BaseObject_PyType_Ready(PyTypeObject *type); Wrapper for PyType_Ready() that will set the correct values into the PyTypeObject to create a BaseObject =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=- int pytalloc_Check(PyObject *) Check whether a specific object is a talloc Object. Returns non-zero if it is a pytalloc_Object and zero otherwise. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=- int pytalloc_BaseObject_Check(PyObject *) Check whether a specific object is a talloc BaseObject. Returns non-zero if it is a pytalloc_BaseObject and zero otherwise. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- int pytalloc_check_type(PyObject *py_obj, type) Check if the object based on `pytalloc_*Object` py_obj. type should be a C type, similar to a type passed to `talloc_get_type`. This can be used as a check before using pytalloc_get_type() or an alternative codepath. Returns non-zero if it is an object of the expected type and zero otherwise. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- type *pytalloc_get_type(PyObject *py_obj, type) Retrieve the pointer from a `pytalloc_Object` py_obj. type should be a C type, similar to a type passed to `talloc_get_type`. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- pytalloc_get_ptr(PyObject *py_obj) Retrieve the pointer from a `pytalloc_Object` or `pytalloc_BaseObject` py_obj. There is no type checking - use `pytalloc_get_type` if possible. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- TALLOC_CTX *pytalloc_get_mem_ctx(PyObject *py_obj) Retrieve the talloc context associated with a pytalloc_Object or pytalloc_BaseObject. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr) Create a new Python wrapping object for a talloc pointer and context, with py_type as associated Python sub type object. This typically used when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. `pytalloc_get_ptr()` can be used to get the pointer out of the object again. This will *not* increment the reference counter for the talloc context, so the caller should make sure such an increment has happened. When the Python object goes away, it will unreference the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_steal(PyTypeObject *py_type, void *ptr) Create a new Python wrapping object for a talloc pointer and context, with py_type as associated Python sub type object. The pointer will also be used as the talloc context. `pytalloc_get_type()` can be used to get the pointer out of the object again. This will *not* increment the reference counter for the talloc context, so the caller should make sure such an increment has happened. When the Python object goes away, it will unreference the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr) Create a new Python wrapping object for a talloc pointer and context, with py_type as associated Python sub type object. This typically used when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. `pytalloc_get_ptr()` can be used to get the pointer out of the object again. This will increment the reference counter for the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_reference(PyTypeObject *py_type, void *talloc_ptr) Create a new Python wrapping object for a talloc pointer, with py_type as associated Python sub type object. The pointer will also be used as the talloc context. `pytalloc_get_type()` can be used to get the pointer out of the object again. This will increment the reference counter for the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_new(type, PyTypeObject *typeobj) Create a new, empty pytalloc_Object with the specified Python type object. type should be a C type, similar to talloc_new(). =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_GenericObject_steal_ex(void *ptr) Create a new Python wrapping object for a generic talloc pointer, as sub type of `pytalloc_BaseObject`. This typically used when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. `pytalloc_get_ptr()` can be used to get the pointer out of the object again. This will *not* increment the reference counter for the talloc context, so the caller should make sure such an increment has happened. When the Python object goes away, it will unreference the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_GenericObject_steal(void *ptr) Create a new Python wrapping object for a generic talloc pointer, as sub type of `pytalloc_BaseObject`. The pointer will also be used as the talloc context. `pytalloc_get_type()` can be used to get the pointer out of the object again. This will *not* increment the reference counter for the talloc context, so the caller should make sure such an increment has happened. When the Python object goes away, it will unreference the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_GenericObject_reference_ex(void *ptr) Create a new Python wrapping object for a generic talloc pointer, as sub type of `pytalloc_BaseObject`. This typically used when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. `pytalloc_get_ptr()` can be used to get the pointer out of the object again. This will increment the reference counter for the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- PyObject *pytalloc_GenericObject_reference(void *ptr) Create a new Python wrapping object for a generic talloc pointer, as sub type of `pytalloc_BaseObject`. The pointer will also be used as the talloc context. `pytalloc_get_type()` can be used to get the pointer out of the object again. This will increment the reference counter for the talloc context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- DEPRECATED! PyObject *pytalloc_CObject_FromTallocPtr(void *); Create a new pytalloc_Object for an abitrary talloc-maintained C pointer. This will use a generic VoidPtr Python type, which just provides an opaque object in Python. The caller is responsible for incrementing the talloc reference count before calling this function - it will dereference the talloc pointer when it is garbage collected. This function is deprecated and only available on Python 2. Use pytalloc_GenericObject_{reference,steal}[_ex]() instead. Debug function for talloc in Python ----------------------------------- The "talloc" module in Python provides a couple of functions that can be used to debug issues with objects wrapped by pytalloc. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- report_full(obj?) Print a full report on a specific object or on all allocated objects by Python. Same behaviour as the `talloc_report_full()` function that is provided by C talloc. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- enable_null_tracking() This enables tracking of the NULL memory context without enabling leak reporting on exit. Useful for when you want to do your own leak reporting call via talloc_report_null_full(). This must be done in the top level script, not an imported module. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- pytalloc_total_blocks(obj?) Return the talloc block count for all allocated objects or a specific object if specified. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0092056 tevent-0.11.0/lib/talloc/pytalloc_private.h0000660000000000000000000000175600000000000020622 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Samba utility functions Copyright (C) Jelmer Vernooij 2008 Copyright (C) Andrew Bartlett 2016 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ typedef struct { PyObject_HEAD TALLOC_CTX *talloc_ctx; TALLOC_CTX *talloc_ptr_ctx; /* eg the start of the array */ void *ptr; /* eg the array element */ } pytalloc_BaseObject; ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1396027 tevent-0.11.0/lib/talloc/pytalloc_util.c0000660000000000000000000001753200000000000020117 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Python/Talloc glue Copyright (C) Jelmer Vernooij 2008 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include "replace.h" #include #include "pytalloc.h" #include #include "pytalloc_private.h" static PyObject *pytalloc_steal_or_reference(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr, bool steal); _PUBLIC_ PyTypeObject *pytalloc_GetObjectType(void) { static PyTypeObject *type = NULL; PyObject *mod; mod = PyImport_ImportModule("talloc"); if (mod == NULL) { return NULL; } type = (PyTypeObject *)PyObject_GetAttrString(mod, "Object"); Py_DECREF(mod); return type; } _PUBLIC_ PyTypeObject *pytalloc_GetBaseObjectType(void) { static PyTypeObject *type = NULL; PyObject *mod; mod = PyImport_ImportModule("talloc"); if (mod == NULL) { return NULL; } type = (PyTypeObject *)PyObject_GetAttrString(mod, "BaseObject"); Py_DECREF(mod); return type; } static PyTypeObject *pytalloc_GetGenericObjectType(void) { static PyTypeObject *type = NULL; PyObject *mod; mod = PyImport_ImportModule("talloc"); if (mod == NULL) { return NULL; } type = (PyTypeObject *)PyObject_GetAttrString(mod, "GenericObject"); Py_DECREF(mod); return type; } /** * Import an existing talloc pointer into a Python object. */ _PUBLIC_ PyObject *pytalloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr) { return pytalloc_steal_or_reference(py_type, mem_ctx, ptr, true); } /** * Import an existing talloc pointer into a Python object. */ _PUBLIC_ PyObject *pytalloc_steal(PyTypeObject *py_type, void *ptr) { return pytalloc_steal_or_reference(py_type, ptr, ptr, true); } /** * Import an existing talloc pointer into a Python object, leaving the * original parent, and creating a reference to the object in the python * object. * * We remember the object we hold the reference to (a * possibly-non-talloc pointer), the existing parent (typically the * start of the array) and the new referenced parent. That way we can * cope with the fact that we will have multiple parents, one per time * python sees the object. */ _PUBLIC_ PyObject *pytalloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr) { return pytalloc_steal_or_reference(py_type, mem_ctx, ptr, false); } /** * Internal function that either steals or referecences the talloc * pointer into a new talloc context. */ static PyObject *pytalloc_steal_or_reference(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr, bool steal) { bool ok = false; TALLOC_CTX *talloc_ctx = NULL; bool is_baseobject = false; PyObject *obj = NULL; PyTypeObject *BaseObjectType = NULL, *ObjectType = NULL; BaseObjectType = pytalloc_GetBaseObjectType(); if (BaseObjectType == NULL) { goto err; } ObjectType = pytalloc_GetObjectType(); if (ObjectType == NULL) { goto err; } /* this should have been tested by caller */ if (mem_ctx == NULL) { return PyErr_NoMemory(); } is_baseobject = PyType_IsSubtype(py_type, BaseObjectType); if (!is_baseobject) { if (!PyType_IsSubtype(py_type, ObjectType)) { PyErr_SetString(PyExc_TypeError, "Expected type based on talloc"); return NULL; } } obj = py_type->tp_alloc(py_type, 0); if (obj == NULL) { goto err; } talloc_ctx = talloc_new(NULL); if (talloc_ctx == NULL) { PyErr_NoMemory(); goto err; } if (steal) { ok = (talloc_steal(talloc_ctx, mem_ctx) != NULL); } else { ok = (talloc_reference(talloc_ctx, mem_ctx) != NULL); } if (!ok) { goto err; } talloc_set_name_const(talloc_ctx, py_type->tp_name); if (is_baseobject) { pytalloc_BaseObject *ret = (pytalloc_BaseObject*)obj; ret->talloc_ctx = talloc_ctx; ret->talloc_ptr_ctx = mem_ctx; ret->ptr = ptr; } else { pytalloc_Object *ret = (pytalloc_Object*)obj; ret->talloc_ctx = talloc_ctx; ret->ptr = ptr; } return obj; err: TALLOC_FREE(talloc_ctx); Py_XDECREF(obj); return NULL; } /* * Wrap a generic talloc pointer into a talloc.GenericObject, * this is a subclass of talloc.BaseObject. */ _PUBLIC_ PyObject *pytalloc_GenericObject_steal_ex(TALLOC_CTX *mem_ctx, void *ptr) { PyTypeObject *tp = pytalloc_GetGenericObjectType(); return pytalloc_steal_ex(tp, mem_ctx, ptr); } /* * Wrap a generic talloc pointer into a talloc.GenericObject, * this is a subclass of talloc.BaseObject. */ _PUBLIC_ PyObject *pytalloc_GenericObject_reference_ex(TALLOC_CTX *mem_ctx, void *ptr) { PyTypeObject *tp = pytalloc_GetGenericObjectType(); return pytalloc_reference_ex(tp, mem_ctx, ptr); } _PUBLIC_ int pytalloc_Check(PyObject *obj) { PyTypeObject *tp = pytalloc_GetObjectType(); return PyObject_TypeCheck(obj, tp); } _PUBLIC_ int pytalloc_BaseObject_check(PyObject *obj) { PyTypeObject *tp = pytalloc_GetBaseObjectType(); return PyObject_TypeCheck(obj, tp); } _PUBLIC_ size_t pytalloc_BaseObject_size(void) { return sizeof(pytalloc_BaseObject); } static void *_pytalloc_get_checked_type(PyObject *py_obj, const char *type_name, bool check_only, const char *function) { TALLOC_CTX *mem_ctx; void *ptr = NULL; void *type_obj; mem_ctx = _pytalloc_get_mem_ctx(py_obj); ptr = _pytalloc_get_ptr(py_obj); if (mem_ctx != ptr || ptr == NULL) { if (check_only) { return NULL; } PyErr_Format(PyExc_TypeError, "%s: expected %s, " "but the pointer is no talloc pointer, " "pytalloc_get_ptr() would get the raw pointer.", function, type_name); return NULL; } type_obj = talloc_check_name(ptr, type_name); if (type_obj == NULL) { const char *name = NULL; if (check_only) { return NULL; } name = talloc_get_name(ptr); PyErr_Format(PyExc_TypeError, "%s: expected %s, got %s", function, type_name, name); return NULL; } return ptr; } _PUBLIC_ int _pytalloc_check_type(PyObject *py_obj, const char *type_name) { void *ptr = NULL; ptr = _pytalloc_get_checked_type(py_obj, type_name, true, /* check_only */ "pytalloc_check_type"); if (ptr == NULL) { return 0; } return 1; } _PUBLIC_ void *_pytalloc_get_type(PyObject *py_obj, const char *type_name) { return _pytalloc_get_checked_type(py_obj, type_name, false, /* not check_only */ "pytalloc_get_type"); } _PUBLIC_ void *_pytalloc_get_ptr(PyObject *py_obj) { if (pytalloc_BaseObject_check(py_obj)) { return ((pytalloc_BaseObject *)py_obj)->ptr; } if (pytalloc_Check(py_obj)) { return ((pytalloc_Object *)py_obj)->ptr; } return NULL; } _PUBLIC_ TALLOC_CTX *_pytalloc_get_mem_ctx(PyObject *py_obj) { if (pytalloc_BaseObject_check(py_obj)) { return ((pytalloc_BaseObject *)py_obj)->talloc_ptr_ctx; } if (pytalloc_Check(py_obj)) { return ((pytalloc_Object *)py_obj)->talloc_ctx; } return NULL; } _PUBLIC_ int pytalloc_BaseObject_PyType_Ready(PyTypeObject *type) { PyTypeObject *talloc_type = pytalloc_GetBaseObjectType(); if (talloc_type == NULL) { return -1; } type->tp_base = talloc_type; type->tp_basicsize = pytalloc_BaseObject_size(); return PyType_Ready(type); } _PUBLIC_ const char *_pytalloc_get_name(PyObject *obj) { void *ptr = pytalloc_get_ptr(obj); if (ptr == NULL) { return "non-talloc object"; } return talloc_get_name(ptr); } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/talloc/talloc.c0000660000000000000000000021575700000000000016522 0ustar00rootroot00000000000000/* Samba Unix SMB/CIFS implementation. Samba trivial allocation library - new interface NOTE: Please read talloc_guide.txt for full documentation Copyright (C) Andrew Tridgell 2004 Copyright (C) Stefan Metzmacher 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* inspired by http://swapped.cc/halloc/ */ #include "replace.h" #include "talloc.h" #ifdef HAVE_SYS_AUXV_H #include #endif #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR) #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR" #endif #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR) #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR" #endif /* Special macros that are no-ops except when run under Valgrind on * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */ #ifdef HAVE_VALGRIND_MEMCHECK_H /* memcheck.h includes valgrind.h */ #include #elif defined(HAVE_VALGRIND_H) #include #endif #define MAX_TALLOC_SIZE 0x10000000 #define TALLOC_FLAG_FREE 0x01 #define TALLOC_FLAG_LOOP 0x02 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */ #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */ /* * Bits above this are random, used to make it harder to fake talloc * headers during an attack. Try not to change this without good reason. */ #define TALLOC_FLAG_MASK 0x0F #define TALLOC_MAGIC_REFERENCE ((const char *)1) #define TALLOC_MAGIC_BASE 0xe814ec70 #define TALLOC_MAGIC_NON_RANDOM ( \ ~TALLOC_FLAG_MASK & ( \ TALLOC_MAGIC_BASE + \ (TALLOC_BUILD_VERSION_MAJOR << 24) + \ (TALLOC_BUILD_VERSION_MINOR << 16) + \ (TALLOC_BUILD_VERSION_RELEASE << 8))) static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM; /* by default we abort when given a bad pointer (such as when talloc_free() is called on a pointer that came from malloc() */ #ifndef TALLOC_ABORT #define TALLOC_ABORT(reason) abort() #endif #ifndef discard_const_p #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T) # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr))) #else # define discard_const_p(type, ptr) ((type *)(ptr)) #endif #endif /* these macros gain us a few percent of speed on gcc */ #if (__GNUC__ >= 3) /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1 as its first argument */ #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #else #ifndef likely #define likely(x) (x) #endif #ifndef unlikely #define unlikely(x) (x) #endif #endif /* this null_context is only used if talloc_enable_leak_report() or talloc_enable_leak_report_full() is called, otherwise it remains NULL */ static void *null_context; static bool talloc_report_null; static bool talloc_report_null_full; static void *autofree_context; static void talloc_setup_atexit(void); /* used to enable fill of memory on free, which can be useful for * catching use after free errors when valgrind is too slow */ static struct { bool initialised; bool enabled; uint8_t fill_value; } talloc_fill; #define TALLOC_FILL_ENV "TALLOC_FREE_FILL" /* * do not wipe the header, to allow the * double-free logic to still work */ #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \ if (unlikely(talloc_fill.enabled)) { \ size_t _flen = (_tc)->size; \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ memset(_fptr, talloc_fill.fill_value, _flen); \ } \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) /* Mark the whole chunk as not accessable */ #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \ size_t _flen = TC_HDR_SIZE + (_tc)->size; \ char *_fptr = (char *)(_tc); \ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \ } while(0) #else #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0) #endif #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \ TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \ TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \ } while (0) #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \ if (unlikely(talloc_fill.enabled)) { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ memset(_fptr, talloc_fill.fill_value, _flen); \ } \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) /* Mark the unused bytes not accessable */ #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \ } while (0) #else #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) #endif #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \ TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \ TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \ } while (0) #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \ if (unlikely(talloc_fill.enabled)) { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ memset(_fptr, talloc_fill.fill_value, _flen); \ } \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) /* Mark the unused bytes as undefined */ #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \ } while (0) #else #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) #endif #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \ TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \ TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) /* Mark the new bytes as undefined */ #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \ size_t _old_used = TC_HDR_SIZE + (_tc)->size; \ size_t _new_used = TC_HDR_SIZE + (_new_size); \ size_t _flen = _new_used - _old_used; \ char *_fptr = _old_used + (char *)(_tc); \ VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \ } while (0) #else #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) #endif #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \ TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \ } while (0) struct talloc_reference_handle { struct talloc_reference_handle *next, *prev; void *ptr; const char *location; }; struct talloc_memlimit { struct talloc_chunk *parent; struct talloc_memlimit *upper; size_t max_size; size_t cur_size; }; static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size); static inline void talloc_memlimit_grow(struct talloc_memlimit *limit, size_t size); static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit, size_t size); static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc); static inline void _tc_set_name_const(struct talloc_chunk *tc, const char *name); static struct talloc_chunk *_vasprintf_tc(const void *t, const char *fmt, va_list ap); typedef int (*talloc_destructor_t)(void *); struct talloc_pool_hdr; struct talloc_chunk { /* * flags includes the talloc magic, which is randomised to * make overwrite attacks harder */ unsigned flags; /* * If you have a logical tree like: * * * / | \ * / | \ * / | \ * * * The actual talloc tree is: * * * | * - - * * The children are linked with next/prev pointers, and * child 1 is linked to the parent with parent/child * pointers. */ struct talloc_chunk *next, *prev; struct talloc_chunk *parent, *child; struct talloc_reference_handle *refs; talloc_destructor_t destructor; const char *name; size_t size; /* * limit semantics: * if 'limit' is set it means all *new* children of the context will * be limited to a total aggregate size ox max_size for memory * allocations. * cur_size is used to keep track of the current use */ struct talloc_memlimit *limit; /* * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool" * is a pointer to the struct talloc_chunk of the pool that it was * allocated from. This way children can quickly find the pool to chew * from. */ struct talloc_pool_hdr *pool; }; union talloc_chunk_cast_u { uint8_t *ptr; struct talloc_chunk *chunk; }; /* 16 byte alignment seems to keep everyone happy */ #define TC_ALIGN16(s) (((s)+15)&~15) #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk)) #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc)) _PUBLIC_ int talloc_version_major(void) { return TALLOC_VERSION_MAJOR; } _PUBLIC_ int talloc_version_minor(void) { return TALLOC_VERSION_MINOR; } _PUBLIC_ int talloc_test_get_magic(void) { return talloc_magic; } static inline void _talloc_chunk_set_free(struct talloc_chunk *tc, const char *location) { /* * Mark this memory as free, and also over-stamp the talloc * magic with the old-style magic. * * Why? This tries to avoid a memory read use-after-free from * disclosing our talloc magic, which would then allow an * attacker to prepare a valid header and so run a destructor. * */ tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE | (tc->flags & TALLOC_FLAG_MASK); /* we mark the freed memory with where we called the free * from. This means on a double free error we can report where * the first free came from */ if (location) { tc->name = location; } } static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc) { /* * Mark this memory as not free. * * Why? This is memory either in a pool (and so available for * talloc's re-use or after the realloc(). We need to mark * the memory as free() before any realloc() call as we can't * write to the memory after that. * * We put back the normal magic instead of the 'not random' * magic. */ tc->flags = talloc_magic | ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE); } static void (*talloc_log_fn)(const char *message); _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message)) { talloc_log_fn = log_fn; } #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE #define CONSTRUCTOR __attribute__((constructor)) #elif defined(HAVE_PRAGMA_INIT) #define CONSTRUCTOR #pragma init (talloc_lib_init) #endif #if defined(HAVE_CONSTRUCTOR_ATTRIBUTE) || defined(HAVE_PRAGMA_INIT) void talloc_lib_init(void) CONSTRUCTOR; void talloc_lib_init(void) { uint32_t random_value; #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM) uint8_t *p; /* * Use the kernel-provided random values used for * ASLR. This won't change per-exec, which is ideal for us */ p = (uint8_t *) getauxval(AT_RANDOM); if (p) { /* * We get 16 bytes from getauxval. By calling rand(), * a totally insecure PRNG, but one that will * deterministically have a different value when called * twice, we ensure that if two talloc-like libraries * are somehow loaded in the same address space, that * because we choose different bytes, we will keep the * protection against collision of multiple talloc * libs. * * This protection is important because the effects of * passing a talloc pointer from one to the other may * be very hard to determine. */ int offset = rand() % (16 - sizeof(random_value)); memcpy(&random_value, p + offset, sizeof(random_value)); } else #endif { /* * Otherwise, hope the location we are loaded in * memory is randomised by someone else */ random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF); } talloc_magic = random_value & ~TALLOC_FLAG_MASK; } #else #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available" #endif static void talloc_lib_atexit(void) { TALLOC_FREE(autofree_context); if (talloc_total_size(null_context) == 0) { return; } if (talloc_report_null_full) { talloc_report_full(null_context, stderr); } else if (talloc_report_null) { talloc_report(null_context, stderr); } } static void talloc_setup_atexit(void) { static bool done; if (done) { return; } atexit(talloc_lib_atexit); done = true; } static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2); static void talloc_log(const char *fmt, ...) { va_list ap; char *message; if (!talloc_log_fn) { return; } va_start(ap, fmt); message = talloc_vasprintf(NULL, fmt, ap); va_end(ap); talloc_log_fn(message); talloc_free(message); } static void talloc_log_stderr(const char *message) { fprintf(stderr, "%s", message); } _PUBLIC_ void talloc_set_log_stderr(void) { talloc_set_log_fn(talloc_log_stderr); } static void (*talloc_abort_fn)(const char *reason); _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason)) { talloc_abort_fn = abort_fn; } static void talloc_abort(const char *reason) { talloc_log("%s\n", reason); if (!talloc_abort_fn) { TALLOC_ABORT(reason); } talloc_abort_fn(reason); } static void talloc_abort_access_after_free(void) { talloc_abort("Bad talloc magic value - access after free"); } static void talloc_abort_unknown_value(void) { talloc_abort("Bad talloc magic value - unknown value"); } /* panic if we get a bad magic value */ static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr) { const char *pp = (const char *)ptr; struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE); if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) { if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) { talloc_log("talloc: access after free error - first free may be at %s\n", tc->name); talloc_abort_access_after_free(); return NULL; } talloc_abort_unknown_value(); return NULL; } return tc; } /* hook into the front of the list */ #define _TLIST_ADD(list, p) \ do { \ if (!(list)) { \ (list) = (p); \ (p)->next = (p)->prev = NULL; \ } else { \ (list)->prev = (p); \ (p)->next = (list); \ (p)->prev = NULL; \ (list) = (p); \ }\ } while (0) /* remove an element from a list - element doesn't have to be in list. */ #define _TLIST_REMOVE(list, p) \ do { \ if ((p) == (list)) { \ (list) = (p)->next; \ if (list) (list)->prev = NULL; \ } else { \ if ((p)->prev) (p)->prev->next = (p)->next; \ if ((p)->next) (p)->next->prev = (p)->prev; \ } \ if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \ } while (0) /* return the parent chunk of a pointer */ static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr) { struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return NULL; } tc = talloc_chunk_from_ptr(ptr); while (tc->prev) tc=tc->prev; return tc->parent; } _PUBLIC_ void *talloc_parent(const void *ptr) { struct talloc_chunk *tc = talloc_parent_chunk(ptr); return tc? TC_PTR_FROM_CHUNK(tc) : NULL; } /* find parents name */ _PUBLIC_ const char *talloc_parent_name(const void *ptr) { struct talloc_chunk *tc = talloc_parent_chunk(ptr); return tc? tc->name : NULL; } /* A pool carries an in-pool object count count in the first 16 bytes. bytes. This is done to support talloc_steal() to a parent outside of the pool. The count includes the pool itself, so a talloc_free() on a pool will only destroy the pool if the count has dropped to zero. A talloc_free() of a pool member will reduce the count, and eventually also call free(3) on the pool memory. The object count is not put into "struct talloc_chunk" because it is only relevant for talloc pools and the alignment to 16 bytes would increase the memory footprint of each talloc chunk by those 16 bytes. */ struct talloc_pool_hdr { void *end; unsigned int object_count; size_t poolsize; }; union talloc_pool_hdr_cast_u { uint8_t *ptr; struct talloc_pool_hdr *hdr; }; #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr)) static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c) { union talloc_chunk_cast_u tcc = { .chunk = c }; union talloc_pool_hdr_cast_u tphc = { tcc.ptr - TP_HDR_SIZE }; return tphc.hdr; } static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h) { union talloc_pool_hdr_cast_u tphc = { .hdr = h }; union talloc_chunk_cast_u tcc = { .ptr = tphc.ptr + TP_HDR_SIZE }; return tcc.chunk; } static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr) { struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr); return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize; } static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr) { return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end; } /* If tc is inside a pool, this gives the next neighbour. */ static inline void *tc_next_chunk(struct talloc_chunk *tc) { return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size); } static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr) { struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr); return tc_next_chunk(tc); } /* Mark the whole remaining pool as not accessable */ static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr) { size_t flen = tc_pool_space_left(pool_hdr); if (unlikely(talloc_fill.enabled)) { memset(pool_hdr->end, talloc_fill.fill_value, flen); } #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen); #endif } /* Allocate from a pool */ static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent, size_t size, size_t prefix_len) { struct talloc_pool_hdr *pool_hdr = NULL; union talloc_chunk_cast_u tcc; size_t space_left; struct talloc_chunk *result; size_t chunk_size; if (parent == NULL) { return NULL; } if (parent->flags & TALLOC_FLAG_POOL) { pool_hdr = talloc_pool_from_chunk(parent); } else if (parent->flags & TALLOC_FLAG_POOLMEM) { pool_hdr = parent->pool; } if (pool_hdr == NULL) { return NULL; } space_left = tc_pool_space_left(pool_hdr); /* * Align size to 16 bytes */ chunk_size = TC_ALIGN16(size + prefix_len); if (space_left < chunk_size) { return NULL; } tcc = (union talloc_chunk_cast_u) { .ptr = ((uint8_t *)pool_hdr->end) + prefix_len }; result = tcc.chunk; #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size); #endif pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size); result->flags = talloc_magic | TALLOC_FLAG_POOLMEM; result->pool = pool_hdr; pool_hdr->object_count++; return result; } /* Allocate a bit of memory as a child of an existing pointer */ static inline void *__talloc_with_prefix(const void *context, size_t size, size_t prefix_len, struct talloc_chunk **tc_ret) { struct talloc_chunk *tc = NULL; struct talloc_memlimit *limit = NULL; size_t total_len = TC_HDR_SIZE + size + prefix_len; struct talloc_chunk *parent = NULL; if (unlikely(context == NULL)) { context = null_context; } if (unlikely(size >= MAX_TALLOC_SIZE)) { return NULL; } if (unlikely(total_len < TC_HDR_SIZE)) { return NULL; } if (likely(context != NULL)) { parent = talloc_chunk_from_ptr(context); if (parent->limit != NULL) { limit = parent->limit; } tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len); } if (tc == NULL) { uint8_t *ptr = NULL; union talloc_chunk_cast_u tcc; /* * Only do the memlimit check/update on actual allocation. */ if (!talloc_memlimit_check(limit, total_len)) { errno = ENOMEM; return NULL; } ptr = malloc(total_len); if (unlikely(ptr == NULL)) { return NULL; } tcc = (union talloc_chunk_cast_u) { .ptr = ptr + prefix_len }; tc = tcc.chunk; tc->flags = talloc_magic; tc->pool = NULL; talloc_memlimit_grow(limit, total_len); } tc->limit = limit; tc->size = size; tc->destructor = NULL; tc->child = NULL; tc->name = NULL; tc->refs = NULL; if (likely(context != NULL)) { if (parent->child) { parent->child->parent = NULL; tc->next = parent->child; tc->next->prev = tc; } else { tc->next = NULL; } tc->parent = parent; tc->prev = NULL; parent->child = tc; } else { tc->next = tc->prev = tc->parent = NULL; } *tc_ret = tc; return TC_PTR_FROM_CHUNK(tc); } static inline void *__talloc(const void *context, size_t size, struct talloc_chunk **tc) { return __talloc_with_prefix(context, size, 0, tc); } /* * Create a talloc pool */ static inline void *_talloc_pool(const void *context, size_t size) { struct talloc_chunk *tc; struct talloc_pool_hdr *pool_hdr; void *result; result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc); if (unlikely(result == NULL)) { return NULL; } pool_hdr = talloc_pool_from_chunk(tc); tc->flags |= TALLOC_FLAG_POOL; tc->size = 0; pool_hdr->object_count = 1; pool_hdr->end = result; pool_hdr->poolsize = size; tc_invalidate_pool(pool_hdr); return result; } _PUBLIC_ void *talloc_pool(const void *context, size_t size) { return _talloc_pool(context, size); } /* * Create a talloc pool correctly sized for a basic size plus * a number of subobjects whose total size is given. Essentially * a custom allocator for talloc to reduce fragmentation. */ _PUBLIC_ void *_talloc_pooled_object(const void *ctx, size_t type_size, const char *type_name, unsigned num_subobjects, size_t total_subobjects_size) { size_t poolsize, subobjects_slack, tmp; struct talloc_chunk *tc; struct talloc_pool_hdr *pool_hdr; void *ret; poolsize = type_size + total_subobjects_size; if ((poolsize < type_size) || (poolsize < total_subobjects_size)) { goto overflow; } if (num_subobjects == UINT_MAX) { goto overflow; } num_subobjects += 1; /* the object body itself */ /* * Alignment can increase the pool size by at most 15 bytes per object * plus alignment for the object itself */ subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects; if (subobjects_slack < num_subobjects) { goto overflow; } tmp = poolsize + subobjects_slack; if ((tmp < poolsize) || (tmp < subobjects_slack)) { goto overflow; } poolsize = tmp; ret = _talloc_pool(ctx, poolsize); if (ret == NULL) { return NULL; } tc = talloc_chunk_from_ptr(ret); tc->size = type_size; pool_hdr = talloc_pool_from_chunk(tc); #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size); #endif pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size)); _tc_set_name_const(tc, type_name); return ret; overflow: return NULL; } /* setup a destructor to be called on free of a pointer the destructor should return 0 on success, or -1 on failure. if the destructor fails then the free is failed, and the memory can be continued to be used */ _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *)) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); tc->destructor = destructor; } /* increase the reference count on a piece of memory. */ _PUBLIC_ int talloc_increase_ref_count(const void *ptr) { if (unlikely(!talloc_reference(null_context, ptr))) { return -1; } return 0; } /* helper for talloc_reference() this is referenced by a function pointer and should not be inline */ static int talloc_reference_destructor(struct talloc_reference_handle *handle) { struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr); _TLIST_REMOVE(ptr_tc->refs, handle); return 0; } /* more efficient way to add a name to a pointer - the name must point to a true string constant */ static inline void _tc_set_name_const(struct talloc_chunk *tc, const char *name) { tc->name = name; } /* internal talloc_named_const() */ static inline void *_talloc_named_const(const void *context, size_t size, const char *name) { void *ptr; struct talloc_chunk *tc; ptr = __talloc(context, size, &tc); if (unlikely(ptr == NULL)) { return NULL; } _tc_set_name_const(tc, name); return ptr; } /* make a secondary reference to a pointer, hanging off the given context. the pointer remains valid until both the original caller and this given context are freed. the major use for this is when two different structures need to reference the same underlying data, and you want to be able to free the two instances separately, and in either order */ _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location) { struct talloc_chunk *tc; struct talloc_reference_handle *handle; if (unlikely(ptr == NULL)) return NULL; tc = talloc_chunk_from_ptr(ptr); handle = (struct talloc_reference_handle *)_talloc_named_const(context, sizeof(struct talloc_reference_handle), TALLOC_MAGIC_REFERENCE); if (unlikely(handle == NULL)) return NULL; /* note that we hang the destructor off the handle, not the main context as that allows the caller to still setup their own destructor on the context if they want to */ talloc_set_destructor(handle, talloc_reference_destructor); handle->ptr = discard_const_p(void, ptr); handle->location = location; _TLIST_ADD(tc->refs, handle); return handle->ptr; } static void *_talloc_steal_internal(const void *new_ctx, const void *ptr); static inline void _tc_free_poolmem(struct talloc_chunk *tc, const char *location) { struct talloc_pool_hdr *pool; struct talloc_chunk *pool_tc; void *next_tc; pool = tc->pool; pool_tc = talloc_chunk_from_pool(pool); next_tc = tc_next_chunk(tc); _talloc_chunk_set_free(tc, location); TC_INVALIDATE_FULL_CHUNK(tc); if (unlikely(pool->object_count == 0)) { talloc_abort("Pool object count zero!"); return; } pool->object_count--; if (unlikely(pool->object_count == 1 && !(pool_tc->flags & TALLOC_FLAG_FREE))) { /* * if there is just one object left in the pool * and pool->flags does not have TALLOC_FLAG_FREE, * it means this is the pool itself and * the rest is available for new objects * again. */ pool->end = tc_pool_first_chunk(pool); tc_invalidate_pool(pool); return; } if (unlikely(pool->object_count == 0)) { /* * we mark the freed memory with where we called the free * from. This means on a double free error we can report where * the first free came from */ pool_tc->name = location; if (pool_tc->flags & TALLOC_FLAG_POOLMEM) { _tc_free_poolmem(pool_tc, location); } else { /* * The tc_memlimit_update_on_free() * call takes into account the * prefix TP_HDR_SIZE allocated before * the pool talloc_chunk. */ tc_memlimit_update_on_free(pool_tc); TC_INVALIDATE_FULL_CHUNK(pool_tc); free(pool); } return; } if (pool->end == next_tc) { /* * if pool->pool still points to end of * 'tc' (which is stored in the 'next_tc' variable), * we can reclaim the memory of 'tc'. */ pool->end = tc; return; } /* * Do nothing. The memory is just "wasted", waiting for the pool * itself to be freed. */ } static inline void _tc_free_children_internal(struct talloc_chunk *tc, void *ptr, const char *location); static inline int _talloc_free_internal(void *ptr, const char *location); /* internal free call that takes a struct talloc_chunk *. */ static inline int _tc_free_internal(struct talloc_chunk *tc, const char *location) { void *ptr_to_free; void *ptr = TC_PTR_FROM_CHUNK(tc); if (unlikely(tc->refs)) { int is_child; /* check if this is a reference from a child or * grandchild back to it's parent or grandparent * * in that case we need to remove the reference and * call another instance of talloc_free() on the current * pointer. */ is_child = talloc_is_parent(tc->refs, ptr); _talloc_free_internal(tc->refs, location); if (is_child) { return _talloc_free_internal(ptr, location); } return -1; } if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) { /* we have a free loop - stop looping */ return 0; } if (unlikely(tc->destructor)) { talloc_destructor_t d = tc->destructor; /* * Protect the destructor against some overwrite * attacks, by explicitly checking it has the right * magic here. */ if (talloc_chunk_from_ptr(ptr) != tc) { /* * This can't actually happen, the * call itself will panic. */ TALLOC_ABORT("talloc_chunk_from_ptr failed!"); } if (d == (talloc_destructor_t)-1) { return -1; } tc->destructor = (talloc_destructor_t)-1; if (d(ptr) == -1) { /* * Only replace the destructor pointer if * calling the destructor didn't modify it. */ if (tc->destructor == (talloc_destructor_t)-1) { tc->destructor = d; } return -1; } tc->destructor = NULL; } if (tc->parent) { _TLIST_REMOVE(tc->parent->child, tc); if (tc->parent->child) { tc->parent->child->parent = tc->parent; } } else { if (tc->prev) tc->prev->next = tc->next; if (tc->next) tc->next->prev = tc->prev; tc->prev = tc->next = NULL; } tc->flags |= TALLOC_FLAG_LOOP; _tc_free_children_internal(tc, ptr, location); _talloc_chunk_set_free(tc, location); if (tc->flags & TALLOC_FLAG_POOL) { struct talloc_pool_hdr *pool; pool = talloc_pool_from_chunk(tc); if (unlikely(pool->object_count == 0)) { talloc_abort("Pool object count zero!"); return 0; } pool->object_count--; if (likely(pool->object_count != 0)) { return 0; } /* * With object_count==0, a pool becomes a normal piece of * memory to free. If it's allocated inside a pool, it needs * to be freed as poolmem, else it needs to be just freed. */ ptr_to_free = pool; } else { ptr_to_free = tc; } if (tc->flags & TALLOC_FLAG_POOLMEM) { _tc_free_poolmem(tc, location); return 0; } tc_memlimit_update_on_free(tc); TC_INVALIDATE_FULL_CHUNK(tc); free(ptr_to_free); return 0; } /* internal talloc_free call */ static inline int _talloc_free_internal(void *ptr, const char *location) { struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return -1; } /* possibly initialised the talloc fill value */ if (unlikely(!talloc_fill.initialised)) { const char *fill = getenv(TALLOC_FILL_ENV); if (fill != NULL) { talloc_fill.enabled = true; talloc_fill.fill_value = strtoul(fill, NULL, 0); } talloc_fill.initialised = true; } tc = talloc_chunk_from_ptr(ptr); return _tc_free_internal(tc, location); } static inline size_t _talloc_total_limit_size(const void *ptr, struct talloc_memlimit *old_limit, struct talloc_memlimit *new_limit); /* move a lump of memory from one talloc context to another return the ptr on success, or NULL if it could not be transferred. passing NULL as ptr will always return NULL with no side effects. */ static void *_talloc_steal_internal(const void *new_ctx, const void *ptr) { struct talloc_chunk *tc, *new_tc; size_t ctx_size = 0; if (unlikely(!ptr)) { return NULL; } if (unlikely(new_ctx == NULL)) { new_ctx = null_context; } tc = talloc_chunk_from_ptr(ptr); if (tc->limit != NULL) { ctx_size = _talloc_total_limit_size(ptr, NULL, NULL); /* Decrement the memory limit from the source .. */ talloc_memlimit_shrink(tc->limit->upper, ctx_size); if (tc->limit->parent == tc) { tc->limit->upper = NULL; } else { tc->limit = NULL; } } if (unlikely(new_ctx == NULL)) { if (tc->parent) { _TLIST_REMOVE(tc->parent->child, tc); if (tc->parent->child) { tc->parent->child->parent = tc->parent; } } else { if (tc->prev) tc->prev->next = tc->next; if (tc->next) tc->next->prev = tc->prev; } tc->parent = tc->next = tc->prev = NULL; return discard_const_p(void, ptr); } new_tc = talloc_chunk_from_ptr(new_ctx); if (unlikely(tc == new_tc || tc->parent == new_tc)) { return discard_const_p(void, ptr); } if (tc->parent) { _TLIST_REMOVE(tc->parent->child, tc); if (tc->parent->child) { tc->parent->child->parent = tc->parent; } } else { if (tc->prev) tc->prev->next = tc->next; if (tc->next) tc->next->prev = tc->prev; tc->prev = tc->next = NULL; } tc->parent = new_tc; if (new_tc->child) new_tc->child->parent = NULL; _TLIST_ADD(new_tc->child, tc); if (tc->limit || new_tc->limit) { ctx_size = _talloc_total_limit_size(ptr, tc->limit, new_tc->limit); /* .. and increment it in the destination. */ if (new_tc->limit) { talloc_memlimit_grow(new_tc->limit, ctx_size); } } return discard_const_p(void, ptr); } /* move a lump of memory from one talloc context to another return the ptr on success, or NULL if it could not be transferred. passing NULL as ptr will always return NULL with no side effects. */ _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location) { struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return NULL; } tc = talloc_chunk_from_ptr(ptr); if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) { struct talloc_reference_handle *h; talloc_log("WARNING: talloc_steal with references at %s\n", location); for (h=tc->refs; h; h=h->next) { talloc_log("\treference at %s\n", h->location); } } #if 0 /* this test is probably too expensive to have on in the normal build, but it useful for debugging */ if (talloc_is_parent(new_ctx, ptr)) { talloc_log("WARNING: stealing into talloc child at %s\n", location); } #endif return _talloc_steal_internal(new_ctx, ptr); } /* this is like a talloc_steal(), but you must supply the old parent. This resolves the ambiguity in a talloc_steal() which is called on a context that has more than one parent (via references) The old parent can be either a reference or a parent */ _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr) { struct talloc_chunk *tc; struct talloc_reference_handle *h; if (unlikely(ptr == NULL)) { return NULL; } if (old_parent == talloc_parent(ptr)) { return _talloc_steal_internal(new_parent, ptr); } tc = talloc_chunk_from_ptr(ptr); for (h=tc->refs;h;h=h->next) { if (talloc_parent(h) == old_parent) { if (_talloc_steal_internal(new_parent, h) != h) { return NULL; } return discard_const_p(void, ptr); } } /* it wasn't a parent */ return NULL; } /* remove a secondary reference to a pointer. This undo's what talloc_reference() has done. The context and pointer arguments must match those given to a talloc_reference() */ static inline int talloc_unreference(const void *context, const void *ptr) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); struct talloc_reference_handle *h; if (unlikely(context == NULL)) { context = null_context; } for (h=tc->refs;h;h=h->next) { struct talloc_chunk *p = talloc_parent_chunk(h); if (p == NULL) { if (context == NULL) break; } else if (TC_PTR_FROM_CHUNK(p) == context) { break; } } if (h == NULL) { return -1; } return _talloc_free_internal(h, __location__); } /* remove a specific parent context from a pointer. This is a more controlled variant of talloc_free() */ /* coverity[ -tainted_data_sink : arg-1 ] */ _PUBLIC_ int talloc_unlink(const void *context, void *ptr) { struct talloc_chunk *tc_p, *new_p, *tc_c; void *new_parent; if (ptr == NULL) { return -1; } if (context == NULL) { context = null_context; } if (talloc_unreference(context, ptr) == 0) { return 0; } if (context != NULL) { tc_c = talloc_chunk_from_ptr(context); } else { tc_c = NULL; } if (tc_c != talloc_parent_chunk(ptr)) { return -1; } tc_p = talloc_chunk_from_ptr(ptr); if (tc_p->refs == NULL) { return _talloc_free_internal(ptr, __location__); } new_p = talloc_parent_chunk(tc_p->refs); if (new_p) { new_parent = TC_PTR_FROM_CHUNK(new_p); } else { new_parent = NULL; } if (talloc_unreference(new_parent, ptr) != 0) { return -1; } _talloc_steal_internal(new_parent, ptr); return 0; } /* add a name to an existing pointer - va_list version */ static inline const char *tc_set_name_v(struct talloc_chunk *tc, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); static inline const char *tc_set_name_v(struct talloc_chunk *tc, const char *fmt, va_list ap) { struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc), fmt, ap); if (likely(name_tc)) { tc->name = TC_PTR_FROM_CHUNK(name_tc); _tc_set_name_const(name_tc, ".name"); } else { tc->name = NULL; } return tc->name; } /* add a name to an existing pointer */ _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); const char *name; va_list ap; va_start(ap, fmt); name = tc_set_name_v(tc, fmt, ap); va_end(ap); return name; } /* create a named talloc pointer. Any talloc pointer can be named, and talloc_named() operates just like talloc() except that it allows you to name the pointer. */ _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...) { va_list ap; void *ptr; const char *name; struct talloc_chunk *tc; ptr = __talloc(context, size, &tc); if (unlikely(ptr == NULL)) return NULL; va_start(ap, fmt); name = tc_set_name_v(tc, fmt, ap); va_end(ap); if (unlikely(name == NULL)) { _talloc_free_internal(ptr, __location__); return NULL; } return ptr; } /* return the name of a talloc ptr, or "UNNAMED" */ static inline const char *__talloc_get_name(const void *ptr) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) { return ".reference"; } if (likely(tc->name)) { return tc->name; } return "UNNAMED"; } _PUBLIC_ const char *talloc_get_name(const void *ptr) { return __talloc_get_name(ptr); } /* check if a pointer has the given name. If it does, return the pointer, otherwise return NULL */ _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name) { const char *pname; if (unlikely(ptr == NULL)) return NULL; pname = __talloc_get_name(ptr); if (likely(pname == name || strcmp(pname, name) == 0)) { return discard_const_p(void, ptr); } return NULL; } static void talloc_abort_type_mismatch(const char *location, const char *name, const char *expected) { const char *reason; reason = talloc_asprintf(NULL, "%s: Type mismatch: name[%s] expected[%s]", location, name?name:"NULL", expected); if (!reason) { reason = "Type mismatch"; } talloc_abort(reason); } _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location) { const char *pname; if (unlikely(ptr == NULL)) { talloc_abort_type_mismatch(location, NULL, name); return NULL; } pname = __talloc_get_name(ptr); if (likely(pname == name || strcmp(pname, name) == 0)) { return discard_const_p(void, ptr); } talloc_abort_type_mismatch(location, pname, name); return NULL; } /* this is for compatibility with older versions of talloc */ _PUBLIC_ void *talloc_init(const char *fmt, ...) { va_list ap; void *ptr; const char *name; struct talloc_chunk *tc; ptr = __talloc(NULL, 0, &tc); if (unlikely(ptr == NULL)) return NULL; va_start(ap, fmt); name = tc_set_name_v(tc, fmt, ap); va_end(ap); if (unlikely(name == NULL)) { _talloc_free_internal(ptr, __location__); return NULL; } return ptr; } static inline void _tc_free_children_internal(struct talloc_chunk *tc, void *ptr, const char *location) { while (tc->child) { /* we need to work out who will own an abandoned child if it cannot be freed. In priority order, the first choice is owner of any remaining reference to this pointer, the second choice is our parent, and the final choice is the null context. */ void *child = TC_PTR_FROM_CHUNK(tc->child); const void *new_parent = null_context; if (unlikely(tc->child->refs)) { struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs); if (p) new_parent = TC_PTR_FROM_CHUNK(p); } if (unlikely(_tc_free_internal(tc->child, location) == -1)) { if (talloc_parent_chunk(child) != tc) { /* * Destructor already reparented this child. * No further reparenting needed. */ continue; } if (new_parent == null_context) { struct talloc_chunk *p = talloc_parent_chunk(ptr); if (p) new_parent = TC_PTR_FROM_CHUNK(p); } _talloc_steal_internal(new_parent, child); } } } /* this is a replacement for the Samba3 talloc_destroy_pool functionality. It should probably not be used in new code. It's in here to keep the talloc code consistent across Samba 3 and 4. */ _PUBLIC_ void talloc_free_children(void *ptr) { struct talloc_chunk *tc_name = NULL; struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return; } tc = talloc_chunk_from_ptr(ptr); /* we do not want to free the context name if it is a child .. */ if (likely(tc->child)) { for (tc_name = tc->child; tc_name; tc_name = tc_name->next) { if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break; } if (tc_name) { _TLIST_REMOVE(tc->child, tc_name); if (tc->child) { tc->child->parent = tc; } } } _tc_free_children_internal(tc, ptr, __location__); /* .. so we put it back after all other children have been freed */ if (tc_name) { if (tc->child) { tc->child->parent = NULL; } tc_name->parent = tc; _TLIST_ADD(tc->child, tc_name); } } /* Allocate a bit of memory as a child of an existing pointer */ _PUBLIC_ void *_talloc(const void *context, size_t size) { struct talloc_chunk *tc; return __talloc(context, size, &tc); } /* externally callable talloc_set_name_const() */ _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name) { _tc_set_name_const(talloc_chunk_from_ptr(ptr), name); } /* create a named talloc pointer. Any talloc pointer can be named, and talloc_named() operates just like talloc() except that it allows you to name the pointer. */ _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name) { return _talloc_named_const(context, size, name); } /* free a talloc pointer. This also frees all child pointers of this pointer recursively return 0 if the memory is actually freed, otherwise -1. The memory will not be freed if the ref_count is > 1 or the destructor (if any) returns non-zero */ _PUBLIC_ int _talloc_free(void *ptr, const char *location) { struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return -1; } tc = talloc_chunk_from_ptr(ptr); if (unlikely(tc->refs != NULL)) { struct talloc_reference_handle *h; if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) { /* in this case we do know which parent should get this pointer, as there is really only one parent */ return talloc_unlink(null_context, ptr); } talloc_log("ERROR: talloc_free with references at %s\n", location); for (h=tc->refs; h; h=h->next) { talloc_log("\treference at %s\n", h->location); } return -1; } return _talloc_free_internal(ptr, location); } /* A talloc version of realloc. The context argument is only used if ptr is NULL */ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name) { struct talloc_chunk *tc; void *new_ptr; bool malloced = false; struct talloc_pool_hdr *pool_hdr = NULL; size_t old_size = 0; size_t new_size = 0; /* size zero is equivalent to free() */ if (unlikely(size == 0)) { talloc_unlink(context, ptr); return NULL; } if (unlikely(size >= MAX_TALLOC_SIZE)) { return NULL; } /* realloc(NULL) is equivalent to malloc() */ if (ptr == NULL) { return _talloc_named_const(context, size, name); } tc = talloc_chunk_from_ptr(ptr); /* don't allow realloc on referenced pointers */ if (unlikely(tc->refs)) { return NULL; } /* don't let anybody try to realloc a talloc_pool */ if (unlikely(tc->flags & TALLOC_FLAG_POOL)) { return NULL; } /* handle realloc inside a talloc_pool */ if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) { pool_hdr = tc->pool; } /* don't shrink if we have less than 1k to gain */ if (size < tc->size && tc->limit == NULL) { if (pool_hdr) { void *next_tc = tc_next_chunk(tc); TC_INVALIDATE_SHRINK_CHUNK(tc, size); tc->size = size; if (next_tc == pool_hdr->end) { /* note: tc->size has changed, so this works */ pool_hdr->end = tc_next_chunk(tc); } return ptr; } else if ((tc->size - size) < 1024) { /* * if we call TC_INVALIDATE_SHRINK_CHUNK() here * we would need to call TC_UNDEFINE_GROW_CHUNK() * after each realloc call, which slows down * testing a lot :-(. * * That is why we only mark memory as undefined here. */ TC_UNDEFINE_SHRINK_CHUNK(tc, size); /* do not shrink if we have less than 1k to gain */ tc->size = size; return ptr; } } else if (tc->size == size) { /* * do not change the pointer if it is exactly * the same size. */ return ptr; } /* * by resetting magic we catch users of the old memory * * We mark this memory as free, and also over-stamp the talloc * magic with the old-style magic. * * Why? This tries to avoid a memory read use-after-free from * disclosing our talloc magic, which would then allow an * attacker to prepare a valid header and so run a destructor. * * What else? We have to re-stamp back a valid normal magic * on this memory once realloc() is done, as it will have done * a memcpy() into the new valid memory. We can't do this in * reverse as that would be a real use-after-free. */ _talloc_chunk_set_free(tc, NULL); if (pool_hdr) { struct talloc_chunk *pool_tc; void *next_tc = tc_next_chunk(tc); size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size); size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size); size_t space_needed; size_t space_left; unsigned int chunk_count = pool_hdr->object_count; pool_tc = talloc_chunk_from_pool(pool_hdr); if (!(pool_tc->flags & TALLOC_FLAG_FREE)) { chunk_count -= 1; } if (chunk_count == 1) { /* * optimize for the case where 'tc' is the only * chunk in the pool. */ char *start = tc_pool_first_chunk(pool_hdr); space_needed = new_chunk_size; space_left = (char *)tc_pool_end(pool_hdr) - start; if (space_left >= space_needed) { size_t old_used = TC_HDR_SIZE + tc->size; size_t new_used = TC_HDR_SIZE + size; new_ptr = start; #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) { /* * The area from * start -> tc may have * been freed and thus been marked as * VALGRIND_MEM_NOACCESS. Set it to * VALGRIND_MEM_UNDEFINED so we can * copy into it without valgrind errors. * We can't just mark * new_ptr -> new_ptr + old_used * as this may overlap on top of tc, * (which is why we use memmove, not * memcpy below) hence the MIN. */ size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used); VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len); } #endif memmove(new_ptr, tc, old_used); tc = (struct talloc_chunk *)new_ptr; TC_UNDEFINE_GROW_CHUNK(tc, size); /* * first we do not align the pool pointer * because we want to invalidate the padding * too. */ pool_hdr->end = new_used + (char *)new_ptr; tc_invalidate_pool(pool_hdr); /* now the aligned pointer */ pool_hdr->end = new_chunk_size + (char *)new_ptr; goto got_new_ptr; } next_tc = NULL; } if (new_chunk_size == old_chunk_size) { TC_UNDEFINE_GROW_CHUNK(tc, size); _talloc_chunk_set_not_free(tc); tc->size = size; return ptr; } if (next_tc == pool_hdr->end) { /* * optimize for the case where 'tc' is the last * chunk in the pool. */ space_needed = new_chunk_size - old_chunk_size; space_left = tc_pool_space_left(pool_hdr); if (space_left >= space_needed) { TC_UNDEFINE_GROW_CHUNK(tc, size); _talloc_chunk_set_not_free(tc); tc->size = size; pool_hdr->end = tc_next_chunk(tc); return ptr; } } new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0); if (new_ptr == NULL) { /* * Couldn't allocate from pool (pool size * counts as already allocated for memlimit * purposes). We must check memory limit * before any real malloc. */ if (tc->limit) { /* * Note we're doing an extra malloc, * on top of the pool size, so account * for size only, not the difference * between old and new size. */ if (!talloc_memlimit_check(tc->limit, size)) { _talloc_chunk_set_not_free(tc); errno = ENOMEM; return NULL; } } new_ptr = malloc(TC_HDR_SIZE+size); malloced = true; new_size = size; } if (new_ptr) { memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE); _tc_free_poolmem(tc, __location__ "_talloc_realloc"); } } else { /* We're doing realloc here, so record the difference. */ old_size = tc->size; new_size = size; /* * We must check memory limit * before any real realloc. */ if (tc->limit && (size > old_size)) { if (!talloc_memlimit_check(tc->limit, (size - old_size))) { _talloc_chunk_set_not_free(tc); errno = ENOMEM; return NULL; } } new_ptr = realloc(tc, size + TC_HDR_SIZE); } got_new_ptr: if (unlikely(!new_ptr)) { /* * Ok, this is a strange spot. We have to put back * the old talloc_magic and any flags, except the * TALLOC_FLAG_FREE as this was not free'ed by the * realloc() call after all */ _talloc_chunk_set_not_free(tc); return NULL; } /* * tc is now the new value from realloc(), the old memory we * can't access any more and was preemptively marked as * TALLOC_FLAG_FREE before the call. Now we mark it as not * free again */ tc = (struct talloc_chunk *)new_ptr; _talloc_chunk_set_not_free(tc); if (malloced) { tc->flags &= ~TALLOC_FLAG_POOLMEM; } if (tc->parent) { tc->parent->child = tc; } if (tc->child) { tc->child->parent = tc; } if (tc->prev) { tc->prev->next = tc; } if (tc->next) { tc->next->prev = tc; } if (new_size > old_size) { talloc_memlimit_grow(tc->limit, new_size - old_size); } else if (new_size < old_size) { talloc_memlimit_shrink(tc->limit, old_size - new_size); } tc->size = size; _tc_set_name_const(tc, name); return TC_PTR_FROM_CHUNK(tc); } /* a wrapper around talloc_steal() for situations where you are moving a pointer between two structures, and want the old pointer to be set to NULL */ _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr) { const void **pptr = discard_const_p(const void *,_pptr); void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr)); (*pptr) = NULL; return ret; } enum talloc_mem_count_type { TOTAL_MEM_SIZE, TOTAL_MEM_BLOCKS, TOTAL_MEM_LIMIT, }; static inline size_t _talloc_total_mem_internal(const void *ptr, enum talloc_mem_count_type type, struct talloc_memlimit *old_limit, struct talloc_memlimit *new_limit) { size_t total = 0; struct talloc_chunk *c, *tc; if (ptr == NULL) { ptr = null_context; } if (ptr == NULL) { return 0; } tc = talloc_chunk_from_ptr(ptr); if (old_limit || new_limit) { if (tc->limit && tc->limit->upper == old_limit) { tc->limit->upper = new_limit; } } /* optimize in the memlimits case */ if (type == TOTAL_MEM_LIMIT && tc->limit != NULL && tc->limit != old_limit && tc->limit->parent == tc) { return tc->limit->cur_size; } if (tc->flags & TALLOC_FLAG_LOOP) { return 0; } tc->flags |= TALLOC_FLAG_LOOP; if (old_limit || new_limit) { if (old_limit == tc->limit) { tc->limit = new_limit; } } switch (type) { case TOTAL_MEM_SIZE: if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) { total = tc->size; } break; case TOTAL_MEM_BLOCKS: total++; break; case TOTAL_MEM_LIMIT: if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) { /* * Don't count memory allocated from a pool * when calculating limits. Only count the * pool itself. */ if (!(tc->flags & TALLOC_FLAG_POOLMEM)) { if (tc->flags & TALLOC_FLAG_POOL) { /* * If this is a pool, the allocated * size is in the pool header, and * remember to add in the prefix * length. */ struct talloc_pool_hdr *pool_hdr = talloc_pool_from_chunk(tc); total = pool_hdr->poolsize + TC_HDR_SIZE + TP_HDR_SIZE; } else { total = tc->size + TC_HDR_SIZE; } } } break; } for (c = tc->child; c; c = c->next) { total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type, old_limit, new_limit); } tc->flags &= ~TALLOC_FLAG_LOOP; return total; } /* return the total size of a talloc pool (subtree) */ _PUBLIC_ size_t talloc_total_size(const void *ptr) { return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL); } /* return the total number of blocks in a talloc pool (subtree) */ _PUBLIC_ size_t talloc_total_blocks(const void *ptr) { return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL); } /* return the number of external references to a pointer */ _PUBLIC_ size_t talloc_reference_count(const void *ptr) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); struct talloc_reference_handle *h; size_t ret = 0; for (h=tc->refs;h;h=h->next) { ret++; } return ret; } /* report on memory usage by all children of a pointer, giving a full tree view */ _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, void (*callback)(const void *ptr, int depth, int max_depth, int is_ref, void *private_data), void *private_data) { struct talloc_chunk *c, *tc; if (ptr == NULL) { ptr = null_context; } if (ptr == NULL) return; tc = talloc_chunk_from_ptr(ptr); if (tc->flags & TALLOC_FLAG_LOOP) { return; } callback(ptr, depth, max_depth, 0, private_data); if (max_depth >= 0 && depth >= max_depth) { return; } tc->flags |= TALLOC_FLAG_LOOP; for (c=tc->child;c;c=c->next) { if (c->name == TALLOC_MAGIC_REFERENCE) { struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c); callback(h->ptr, depth + 1, max_depth, 1, private_data); } else { talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data); } } tc->flags &= ~TALLOC_FLAG_LOOP; } static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f) { const char *name = __talloc_get_name(ptr); struct talloc_chunk *tc; FILE *f = (FILE *)_f; if (is_ref) { fprintf(f, "%*sreference to: %s\n", depth*4, "", name); return; } tc = talloc_chunk_from_ptr(ptr); if (tc->limit && tc->limit->parent == tc) { fprintf(f, "%*s%-30s is a memlimit context" " (max_size = %lu bytes, cur_size = %lu bytes)\n", depth*4, "", name, (unsigned long)tc->limit->max_size, (unsigned long)tc->limit->cur_size); } if (depth == 0) { fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n", (max_depth < 0 ? "full " :""), name, (unsigned long)talloc_total_size(ptr), (unsigned long)talloc_total_blocks(ptr)); return; } fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n", depth*4, "", name, (unsigned long)talloc_total_size(ptr), (unsigned long)talloc_total_blocks(ptr), (int)talloc_reference_count(ptr), ptr); #if 0 fprintf(f, "content: "); if (talloc_total_size(ptr)) { int tot = talloc_total_size(ptr); int i; for (i = 0; i < tot; i++) { if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) { fprintf(f, "%c", ((char *)ptr)[i]); } else { fprintf(f, "~%02x", ((char *)ptr)[i]); } } } fprintf(f, "\n"); #endif } /* report on memory usage by all children of a pointer, giving a full tree view */ _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f) { if (f) { talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f); fflush(f); } } /* report on memory usage by all children of a pointer, giving a full tree view */ _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f) { talloc_report_depth_file(ptr, 0, -1, f); } /* report on memory usage by all children of a pointer */ _PUBLIC_ void talloc_report(const void *ptr, FILE *f) { talloc_report_depth_file(ptr, 0, 1, f); } /* enable tracking of the NULL context */ _PUBLIC_ void talloc_enable_null_tracking(void) { if (null_context == NULL) { null_context = _talloc_named_const(NULL, 0, "null_context"); if (autofree_context != NULL) { talloc_reparent(NULL, null_context, autofree_context); } } } /* enable tracking of the NULL context, not moving the autofree context into the NULL context. This is needed for the talloc testsuite */ _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void) { if (null_context == NULL) { null_context = _talloc_named_const(NULL, 0, "null_context"); } } /* disable tracking of the NULL context */ _PUBLIC_ void talloc_disable_null_tracking(void) { if (null_context != NULL) { /* we have to move any children onto the real NULL context */ struct talloc_chunk *tc, *tc2; tc = talloc_chunk_from_ptr(null_context); for (tc2 = tc->child; tc2; tc2=tc2->next) { if (tc2->parent == tc) tc2->parent = NULL; if (tc2->prev == tc) tc2->prev = NULL; } for (tc2 = tc->next; tc2; tc2=tc2->next) { if (tc2->parent == tc) tc2->parent = NULL; if (tc2->prev == tc) tc2->prev = NULL; } tc->child = NULL; tc->next = NULL; } talloc_free(null_context); null_context = NULL; } /* enable leak reporting on exit */ _PUBLIC_ void talloc_enable_leak_report(void) { talloc_enable_null_tracking(); talloc_report_null = true; talloc_setup_atexit(); } /* enable full leak reporting on exit */ _PUBLIC_ void talloc_enable_leak_report_full(void) { talloc_enable_null_tracking(); talloc_report_null_full = true; talloc_setup_atexit(); } /* talloc and zero memory. */ _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name) { void *p = _talloc_named_const(ctx, size, name); if (p) { memset(p, '\0', size); } return p; } /* memdup with a talloc. */ _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name) { void *newp = NULL; if (likely(size > 0) && unlikely(p == NULL)) { return NULL; } newp = _talloc_named_const(t, size, name); if (likely(newp != NULL) && likely(size > 0)) { memcpy(newp, p, size); } return newp; } static inline char *__talloc_strlendup(const void *t, const char *p, size_t len) { char *ret; struct talloc_chunk *tc; ret = (char *)__talloc(t, len + 1, &tc); if (unlikely(!ret)) return NULL; memcpy(ret, p, len); ret[len] = 0; _tc_set_name_const(tc, ret); return ret; } /* strdup with a talloc */ _PUBLIC_ char *talloc_strdup(const void *t, const char *p) { if (unlikely(!p)) return NULL; return __talloc_strlendup(t, p, strlen(p)); } /* strndup with a talloc */ _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n) { if (unlikely(!p)) return NULL; return __talloc_strlendup(t, p, strnlen(p, n)); } static inline char *__talloc_strlendup_append(char *s, size_t slen, const char *a, size_t alen) { char *ret; ret = talloc_realloc(NULL, s, char, slen + alen + 1); if (unlikely(!ret)) return NULL; /* append the string and the trailing \0 */ memcpy(&ret[slen], a, alen); ret[slen+alen] = 0; _tc_set_name_const(talloc_chunk_from_ptr(ret), ret); return ret; } /* * Appends at the end of the string. */ _PUBLIC_ char *talloc_strdup_append(char *s, const char *a) { if (unlikely(!s)) { return talloc_strdup(NULL, a); } if (unlikely(!a)) { return s; } return __talloc_strlendup_append(s, strlen(s), a, strlen(a)); } /* * Appends at the end of the talloc'ed buffer, * not the end of the string. */ _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a) { size_t slen; if (unlikely(!s)) { return talloc_strdup(NULL, a); } if (unlikely(!a)) { return s; } slen = talloc_get_size(s); if (likely(slen > 0)) { slen--; } return __talloc_strlendup_append(s, slen, a, strlen(a)); } /* * Appends at the end of the string. */ _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n) { if (unlikely(!s)) { return talloc_strndup(NULL, a, n); } if (unlikely(!a)) { return s; } return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n)); } /* * Appends at the end of the talloc'ed buffer, * not the end of the string. */ _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n) { size_t slen; if (unlikely(!s)) { return talloc_strndup(NULL, a, n); } if (unlikely(!a)) { return s; } slen = talloc_get_size(s); if (likely(slen > 0)) { slen--; } return __talloc_strlendup_append(s, slen, a, strnlen(a, n)); } #ifndef HAVE_VA_COPY #ifdef HAVE___VA_COPY #define va_copy(dest, src) __va_copy(dest, src) #else #define va_copy(dest, src) (dest) = (src) #endif #endif static struct talloc_chunk *_vasprintf_tc(const void *t, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); static struct talloc_chunk *_vasprintf_tc(const void *t, const char *fmt, va_list ap) { int vlen; size_t len; char *ret; va_list ap2; struct talloc_chunk *tc; char buf[1024]; /* this call looks strange, but it makes it work on older solaris boxes */ va_copy(ap2, ap); vlen = vsnprintf(buf, sizeof(buf), fmt, ap2); va_end(ap2); if (unlikely(vlen < 0)) { return NULL; } len = vlen; if (unlikely(len + 1 < len)) { return NULL; } ret = (char *)__talloc(t, len+1, &tc); if (unlikely(!ret)) return NULL; if (len < sizeof(buf)) { memcpy(ret, buf, len+1); } else { va_copy(ap2, ap); vsnprintf(ret, len+1, fmt, ap2); va_end(ap2); } _tc_set_name_const(tc, ret); return tc; } _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap) { struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap); if (tc == NULL) { return NULL; } return TC_PTR_FROM_CHUNK(tc); } /* Perform string formatting, and return a pointer to newly allocated memory holding the result, inside a memory pool. */ _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...) { va_list ap; char *ret; va_start(ap, fmt); ret = talloc_vasprintf(t, fmt, ap); va_end(ap); return ret; } static inline char *__talloc_vaslenprintf_append(char *s, size_t slen, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0); static inline char *__talloc_vaslenprintf_append(char *s, size_t slen, const char *fmt, va_list ap) { ssize_t alen; va_list ap2; char c; va_copy(ap2, ap); alen = vsnprintf(&c, 1, fmt, ap2); va_end(ap2); if (alen <= 0) { /* Either the vsnprintf failed or the format resulted in * no characters being formatted. In the former case, we * ought to return NULL, in the latter we ought to return * the original string. Most current callers of this * function expect it to never return NULL. */ return s; } s = talloc_realloc(NULL, s, char, slen + alen + 1); if (!s) return NULL; va_copy(ap2, ap); vsnprintf(s + slen, alen + 1, fmt, ap2); va_end(ap2); _tc_set_name_const(talloc_chunk_from_ptr(s), s); return s; } /** * Realloc @p s to append the formatted result of @p fmt and @p ap, * and return @p s, which may have moved. Good for gradually * accumulating output into a string buffer. Appends at the end * of the string. **/ _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap) { if (unlikely(!s)) { return talloc_vasprintf(NULL, fmt, ap); } return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap); } /** * Realloc @p s to append the formatted result of @p fmt and @p ap, * and return @p s, which may have moved. Always appends at the * end of the talloc'ed buffer, not the end of the string. **/ _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap) { size_t slen; if (unlikely(!s)) { return talloc_vasprintf(NULL, fmt, ap); } slen = talloc_get_size(s); if (likely(slen > 0)) { slen--; } return __talloc_vaslenprintf_append(s, slen, fmt, ap); } /* Realloc @p s to append the formatted result of @p fmt and return @p s, which may have moved. Good for gradually accumulating output into a string buffer. */ _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...) { va_list ap; va_start(ap, fmt); s = talloc_vasprintf_append(s, fmt, ap); va_end(ap); return s; } /* Realloc @p s to append the formatted result of @p fmt and return @p s, which may have moved. Good for gradually accumulating output into a buffer. */ _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...) { va_list ap; va_start(ap, fmt); s = talloc_vasprintf_append_buffer(s, fmt, ap); va_end(ap); return s; } /* alloc an array, checking for integer overflow in the array size */ _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name) { if (count >= MAX_TALLOC_SIZE/el_size) { return NULL; } return _talloc_named_const(ctx, el_size * count, name); } /* alloc an zero array, checking for integer overflow in the array size */ _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name) { if (count >= MAX_TALLOC_SIZE/el_size) { return NULL; } return _talloc_zero(ctx, el_size * count, name); } /* realloc an array, checking for integer overflow in the array size */ _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name) { if (count >= MAX_TALLOC_SIZE/el_size) { return NULL; } return _talloc_realloc(ctx, ptr, el_size * count, name); } /* a function version of talloc_realloc(), so it can be passed as a function pointer to libraries that want a realloc function (a realloc function encapsulates all the basic capabilities of an allocation library, which is why this is useful) */ _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size) { return _talloc_realloc(context, ptr, size, NULL); } static int talloc_autofree_destructor(void *ptr) { autofree_context = NULL; return 0; } /* return a context which will be auto-freed on exit this is useful for reducing the noise in leak reports */ _PUBLIC_ void *talloc_autofree_context(void) { if (autofree_context == NULL) { autofree_context = _talloc_named_const(NULL, 0, "autofree_context"); talloc_set_destructor(autofree_context, talloc_autofree_destructor); talloc_setup_atexit(); } return autofree_context; } _PUBLIC_ size_t talloc_get_size(const void *context) { struct talloc_chunk *tc; if (context == NULL) { return 0; } tc = talloc_chunk_from_ptr(context); return tc->size; } /* find a parent of this context that has the given name, if any */ _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name) { struct talloc_chunk *tc; if (context == NULL) { return NULL; } tc = talloc_chunk_from_ptr(context); while (tc) { if (tc->name && strcmp(tc->name, name) == 0) { return TC_PTR_FROM_CHUNK(tc); } while (tc && tc->prev) tc = tc->prev; if (tc) { tc = tc->parent; } } return NULL; } /* show the parentage of a context */ _PUBLIC_ void talloc_show_parents(const void *context, FILE *file) { struct talloc_chunk *tc; if (context == NULL) { fprintf(file, "talloc no parents for NULL\n"); return; } tc = talloc_chunk_from_ptr(context); fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context)); while (tc) { fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc))); while (tc && tc->prev) tc = tc->prev; if (tc) { tc = tc->parent; } } fflush(file); } /* return 1 if ptr is a parent of context */ static int _talloc_is_parent(const void *context, const void *ptr, int depth) { struct talloc_chunk *tc; if (context == NULL) { return 0; } tc = talloc_chunk_from_ptr(context); while (tc) { if (depth <= 0) { return 0; } if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1; while (tc && tc->prev) tc = tc->prev; if (tc) { tc = tc->parent; depth--; } } return 0; } /* return 1 if ptr is a parent of context */ _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr) { return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH); } /* return the total size of memory used by this context and all children */ static inline size_t _talloc_total_limit_size(const void *ptr, struct talloc_memlimit *old_limit, struct talloc_memlimit *new_limit) { return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT, old_limit, new_limit); } static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size) { struct talloc_memlimit *l; for (l = limit; l != NULL; l = l->upper) { if (l->max_size != 0 && ((l->max_size <= l->cur_size) || (l->max_size - l->cur_size < size))) { return false; } } return true; } /* Update memory limits when freeing a talloc_chunk. */ static void tc_memlimit_update_on_free(struct talloc_chunk *tc) { size_t limit_shrink_size; if (!tc->limit) { return; } /* * Pool entries don't count. Only the pools * themselves are counted as part of the memory * limits. Note that this also takes care of * nested pools which have both flags * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set. */ if (tc->flags & TALLOC_FLAG_POOLMEM) { return; } /* * If we are part of a memory limited context hierarchy * we need to subtract the memory used from the counters */ limit_shrink_size = tc->size+TC_HDR_SIZE; /* * If we're deallocating a pool, take into * account the prefix size added for the pool. */ if (tc->flags & TALLOC_FLAG_POOL) { limit_shrink_size += TP_HDR_SIZE; } talloc_memlimit_shrink(tc->limit, limit_shrink_size); if (tc->limit->parent == tc) { free(tc->limit); } tc->limit = NULL; } /* Increase memory limit accounting after a malloc/realloc. */ static void talloc_memlimit_grow(struct talloc_memlimit *limit, size_t size) { struct talloc_memlimit *l; for (l = limit; l != NULL; l = l->upper) { size_t new_cur_size = l->cur_size + size; if (new_cur_size < l->cur_size) { talloc_abort("logic error in talloc_memlimit_grow\n"); return; } l->cur_size = new_cur_size; } } /* Decrease memory limit accounting after a free/realloc. */ static void talloc_memlimit_shrink(struct talloc_memlimit *limit, size_t size) { struct talloc_memlimit *l; for (l = limit; l != NULL; l = l->upper) { if (l->cur_size < size) { talloc_abort("logic error in talloc_memlimit_shrink\n"); return; } l->cur_size = l->cur_size - size; } } _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx); struct talloc_memlimit *orig_limit; struct talloc_memlimit *limit = NULL; if (tc->limit && tc->limit->parent == tc) { tc->limit->max_size = max_size; return 0; } orig_limit = tc->limit; limit = malloc(sizeof(struct talloc_memlimit)); if (limit == NULL) { return 1; } limit->parent = tc; limit->max_size = max_size; limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit); if (orig_limit) { limit->upper = orig_limit; } else { limit->upper = NULL; } return 0; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/talloc/talloc.h0000660000000000000000000017677000000000000016530 0ustar00rootroot00000000000000#ifndef _TALLOC_H_ #define _TALLOC_H_ /* Unix SMB/CIFS implementation. Samba temporary memory allocation functions Copyright (C) Andrew Tridgell 2004-2005 Copyright (C) Stefan Metzmacher 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include #include #ifdef __cplusplus extern "C" { #endif /* for old gcc releases that don't have the feature test macro __has_attribute */ #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef _PUBLIC_ #if __has_attribute(visibility) #define _PUBLIC_ __attribute__((visibility("default"))) #else #define _PUBLIC_ #endif #endif /** * @defgroup talloc The talloc API * * talloc is a hierarchical, reference counted memory pool system with * destructors. It is the core memory allocator used in Samba. * * @{ */ #define TALLOC_VERSION_MAJOR 2 #define TALLOC_VERSION_MINOR 3 _PUBLIC_ int talloc_version_major(void); _PUBLIC_ int talloc_version_minor(void); /* This is mostly useful only for testing */ _PUBLIC_ int talloc_test_get_magic(void); /** * @brief Define a talloc parent type * * As talloc is a hierarchial memory allocator, every talloc chunk is a * potential parent to other talloc chunks. So defining a separate type for a * talloc chunk is not strictly necessary. TALLOC_CTX is defined nevertheless, * as it provides an indicator for function arguments. You will frequently * write code like * * @code * struct foo *foo_create(TALLOC_CTX *mem_ctx) * { * struct foo *result; * result = talloc(mem_ctx, struct foo); * if (result == NULL) return NULL; * ... initialize foo ... * return result; * } * @endcode * * In this type of allocating functions it is handy to have a general * TALLOC_CTX type to indicate which parent to put allocated structures on. */ typedef void TALLOC_CTX; /* this uses a little trick to allow __LINE__ to be stringified */ #ifndef __location__ #define __TALLOC_STRING_LINE1__(s) #s #define __TALLOC_STRING_LINE2__(s) __TALLOC_STRING_LINE1__(s) #define __TALLOC_STRING_LINE3__ __TALLOC_STRING_LINE2__(__LINE__) #define __location__ __FILE__ ":" __TALLOC_STRING_LINE3__ #endif #ifndef TALLOC_DEPRECATED #define TALLOC_DEPRECATED 0 #endif #ifndef PRINTF_ATTRIBUTE #if __has_attribute(format) || (__GNUC__ >= 3) /** Use gcc attribute to check printf fns. a1 is the 1-based index of * the parameter containing the format, and a2 the index of the first * argument. Note that some gcc 2.x versions don't handle this * properly **/ #define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) #else #define PRINTF_ATTRIBUTE(a1, a2) #endif #endif #ifndef _DEPRECATED_ #if __has_attribute(deprecated) || (__GNUC__ >= 3) #define _DEPRECATED_ __attribute__ ((deprecated)) #else #define _DEPRECATED_ #endif #endif #ifdef DOXYGEN /** * @brief Create a new talloc context. * * The talloc() macro is the core of the talloc library. It takes a memory * context and a type, and returns a pointer to a new area of memory of the * given type. * * The returned pointer is itself a talloc context, so you can use it as the * context argument to more calls to talloc if you wish. * * The returned pointer is a "child" of the supplied context. This means that if * you talloc_free() the context then the new child disappears as well. * Alternatively you can free just the child. * * @param[in] ctx A talloc context to create a new reference on or NULL to * create a new top level context. * * @param[in] type The type of memory to allocate. * * @return A type casted talloc context or NULL on error. * * @code * unsigned int *a, *b; * * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * @endcode * * @see talloc_zero * @see talloc_array * @see talloc_steal * @see talloc_free */ _PUBLIC_ void *talloc(const void *ctx, #type); #else #define talloc(ctx, type) (type *)talloc_named_const(ctx, sizeof(type), #type) _PUBLIC_ void *_talloc(const void *context, size_t size); #endif /** * @brief Create a new top level talloc context. * * This function creates a zero length named talloc context as a top level * context. It is equivalent to: * * @code * talloc_named(NULL, 0, fmt, ...); * @endcode * @param[in] fmt Format string for the name. * * @param[in] ... Additional printf-style arguments. * * @return The allocated memory chunk, NULL on error. * * @see talloc_named() */ _PUBLIC_ void *talloc_init(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2); #ifdef DOXYGEN /** * @brief Free a chunk of talloc memory. * * The talloc_free() function frees a piece of talloc memory, and all its * children. You can call talloc_free() on any pointer returned by * talloc(). * * The return value of talloc_free() indicates success or failure, with 0 * returned for success and -1 for failure. A possible failure condition * is if the pointer had a destructor attached to it and the destructor * returned -1. See talloc_set_destructor() for details on * destructors. Likewise, if "ptr" is NULL, then the function will make * no modifications and return -1. * * From version 2.0 and onwards, as a special case, talloc_free() is * refused on pointers that have more than one parent associated, as talloc * would have no way of knowing which parent should be removed. This is * different from older versions in the sense that always the reference to * the most recently established parent has been destroyed. Hence to free a * pointer that has more than one parent please use talloc_unlink(). * * To help you find problems in your code caused by this behaviour, if * you do try and free a pointer with more than one parent then the * talloc logging function will be called to give output like this: * * @code * ERROR: talloc_free with references at some_dir/source/foo.c:123 * reference at some_dir/source/other.c:325 * reference at some_dir/source/third.c:121 * @endcode * * Please see the documentation for talloc_set_log_fn() and * talloc_set_log_stderr() for more information on talloc logging * functions. * * If TALLOC_FREE_FILL environment variable is set, * the memory occupied by the context is filled with the value of this variable. * The value should be a numeric representation of the character you want to * use. * * talloc_free() operates recursively on its children. * * @param[in] ptr The chunk to be freed. * * @return Returns 0 on success and -1 on error. A possible * failure condition is if the pointer had a destructor * attached to it and the destructor returned -1. Likewise, * if "ptr" is NULL, then the function will make no * modifications and returns -1. * * Example: * @code * unsigned int *a, *b; * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * * talloc_free(a); // Frees a and b * @endcode * * @see talloc_set_destructor() * @see talloc_unlink() */ _PUBLIC_ int talloc_free(void *ptr); #else #define talloc_free(ctx) _talloc_free(ctx, __location__) _PUBLIC_ int _talloc_free(void *ptr, const char *location); #endif /** * @brief Free a talloc chunk's children. * * The function walks along the list of all children of a talloc context and * talloc_free()s only the children, not the context itself. * * A NULL argument is handled as no-op. * * @param[in] ptr The chunk that you want to free the children of * (NULL is allowed too) */ _PUBLIC_ void talloc_free_children(void *ptr); #ifdef DOXYGEN /** * @brief Assign a destructor function to be called when a chunk is freed. * * The function talloc_set_destructor() sets the "destructor" for the pointer * "ptr". A destructor is a function that is called when the memory used by a * pointer is about to be released. The destructor receives the pointer as an * argument, and should return 0 for success and -1 for failure. * * The destructor can do anything it wants to, including freeing other pieces * of memory. A common use for destructors is to clean up operating system * resources (such as open file descriptors) contained in the structure the * destructor is placed on. * * You can only place one destructor on a pointer. If you need more than one * destructor then you can create a zero-length child of the pointer and place * an additional destructor on that. * * To remove a destructor call talloc_set_destructor() with NULL for the * destructor. * * If your destructor attempts to talloc_free() the pointer that it is the * destructor for then talloc_free() will return -1 and the free will be * ignored. This would be a pointless operation anyway, as the destructor is * only called when the memory is just about to go away. * * @param[in] ptr The talloc chunk to add a destructor to. * * @param[in] destructor The destructor function to be called. NULL to remove * it. * * Example: * @code * static int destroy_fd(int *fd) { * close(*fd); * return 0; * } * * int *open_file(const char *filename) { * int *fd = talloc(NULL, int); * *fd = open(filename, O_RDONLY); * if (*fd < 0) { * talloc_free(fd); * return NULL; * } * // Whenever they free this, we close the file. * talloc_set_destructor(fd, destroy_fd); * return fd; * } * @endcode * * @see talloc() * @see talloc_free() */ _PUBLIC_ void talloc_set_destructor(const void *ptr, int (*destructor)(void *)); /** * @brief Change a talloc chunk's parent. * * The talloc_steal() function changes the parent context of a talloc * pointer. It is typically used when the context that the pointer is * currently a child of is going to be freed and you wish to keep the * memory for a longer time. * * To make the changed hierarchy less error-prone, you might consider to use * talloc_move(). * * If you try and call talloc_steal() on a pointer that has more than one * parent then the result is ambiguous. Talloc will choose to remove the * parent that is currently indicated by talloc_parent() and replace it with * the chosen parent. You will also get a message like this via the talloc * logging functions: * * @code * WARNING: talloc_steal with references at some_dir/source/foo.c:123 * reference at some_dir/source/other.c:325 * reference at some_dir/source/third.c:121 * @endcode * * To unambiguously change the parent of a pointer please see the function * talloc_reparent(). See the talloc_set_log_fn() documentation for more * information on talloc logging. * * @param[in] new_ctx The new parent context. * * @param[in] ptr The talloc chunk to move. * * @return Returns the pointer that you pass it. It does not have * any failure modes. * * @note It is possible to produce loops in the parent/child relationship * if you are not careful with talloc_steal(). No guarantees are provided * as to your sanity or the safety of your data if you do this. */ _PUBLIC_ void *talloc_steal(const void *new_ctx, const void *ptr); #else /* DOXYGEN */ /* try to make talloc_set_destructor() and talloc_steal() type safe, if we have a recent gcc */ #if (__GNUC__ >= 3) #define _TALLOC_TYPEOF(ptr) __typeof__(ptr) #define talloc_set_destructor(ptr, function) \ do { \ int (*_talloc_destructor_fn)(_TALLOC_TYPEOF(ptr)) = (function); \ _talloc_set_destructor((ptr), (int (*)(void *))_talloc_destructor_fn); \ } while(0) /* this extremely strange macro is to avoid some braindamaged warning stupidity in gcc 4.1.x */ #define talloc_steal(ctx, ptr) ({ _TALLOC_TYPEOF(ptr) __talloc_steal_ret = (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__); __talloc_steal_ret; }) #else /* __GNUC__ >= 3 */ #define talloc_set_destructor(ptr, function) \ _talloc_set_destructor((ptr), (int (*)(void *))(function)) #define _TALLOC_TYPEOF(ptr) void * #define talloc_steal(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__) #endif /* __GNUC__ >= 3 */ _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*_destructor)(void *)); _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location); #endif /* DOXYGEN */ /** * @brief Assign a name to a talloc chunk. * * Each talloc pointer has a "name". The name is used principally for * debugging purposes, although it is also possible to set and get the name on * a pointer in as a way of "marking" pointers in your code. * * The main use for names on pointer is for "talloc reports". See * talloc_report() and talloc_report_full() for details. Also see * talloc_enable_leak_report() and talloc_enable_leak_report_full(). * * The talloc_set_name() function allocates memory as a child of the * pointer. It is logically equivalent to: * * @code * talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...)); * @endcode * * @param[in] ptr The talloc chunk to assign a name to. * * @param[in] fmt Format string for the name. * * @param[in] ... Add printf-style additional arguments. * * @return The assigned name, NULL on error. * * @note Multiple calls to talloc_set_name() will allocate more memory without * releasing the name. All of the memory is released when the ptr is freed * using talloc_free(). */ _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); #ifdef DOXYGEN /** * @brief Change a talloc chunk's parent. * * This function has the same effect as talloc_steal(), and additionally sets * the source pointer to NULL. You would use it like this: * * @code * struct foo *X = talloc(tmp_ctx, struct foo); * struct foo *Y; * Y = talloc_move(new_ctx, &X); * @endcode * * @param[in] new_ctx The new parent context. * * @param[in] pptr Pointer to a pointer to the talloc chunk to move. * * @return The pointer to the talloc chunk that moved. * It does not have any failure modes. * */ _PUBLIC_ void *talloc_move(const void *new_ctx, void **pptr); #else #define talloc_move(ctx, pptr) (_TALLOC_TYPEOF(*(pptr)))_talloc_move((ctx),(void *)(pptr)) _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *pptr); #endif /** * @brief Assign a name to a talloc chunk. * * The function is just like talloc_set_name(), but it takes a string constant, * and is much faster. It is extensively used by the "auto naming" macros, such * as talloc_p(). * * This function does not allocate any memory. It just copies the supplied * pointer into the internal representation of the talloc ptr. This means you * must not pass a name pointer to memory that will disappear before the ptr * is freed with talloc_free(). * * @param[in] ptr The talloc chunk to assign a name to. * * @param[in] name Format string for the name. */ _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name); /** * @brief Create a named talloc chunk. * * The talloc_named() function creates a named talloc pointer. It is * equivalent to: * * @code * ptr = talloc_size(context, size); * talloc_set_name(ptr, fmt, ....); * @endcode * * @param[in] context The talloc context to hang the result off. * * @param[in] size Number of char's that you want to allocate. * * @param[in] fmt Format string for the name. * * @param[in] ... Additional printf-style arguments. * * @return The allocated memory chunk, NULL on error. * * @see talloc_set_name() */ _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...) PRINTF_ATTRIBUTE(3,4); /** * @brief Basic routine to allocate a chunk of memory. * * This is equivalent to: * * @code * ptr = talloc_size(context, size); * talloc_set_name_const(ptr, name); * @endcode * * @param[in] context The parent context. * * @param[in] size The number of char's that we want to allocate. * * @param[in] name The name the talloc block has. * * @return The allocated memory chunk, NULL on error. */ _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name); #ifdef DOXYGEN /** * @brief Untyped allocation. * * The function should be used when you don't have a convenient type to pass to * talloc(). Unlike talloc(), it is not type safe (as it returns a void *), so * you are on your own for type checking. * * Best to use talloc() or talloc_array() instead. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] size Number of char's that you want to allocate. * * @return The allocated memory chunk, NULL on error. * * Example: * @code * void *mem = talloc_size(NULL, 100); * @endcode */ _PUBLIC_ void *talloc_size(const void *ctx, size_t size); #else #define talloc_size(ctx, size) talloc_named_const(ctx, size, __location__) #endif #ifdef DOXYGEN /** * @brief Allocate into a typed pointer. * * The talloc_ptrtype() macro should be used when you have a pointer and want * to allocate memory to point at with this pointer. When compiling with * gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() and * talloc_get_name() will return the current location in the source file and * not the type. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The pointer you want to assign the result to. * * @return The properly casted allocated memory chunk, NULL on * error. * * Example: * @code * unsigned int *a = talloc_ptrtype(NULL, a); * @endcode */ _PUBLIC_ void *talloc_ptrtype(const void *ctx, #type); #else #define talloc_ptrtype(ctx, ptr) (_TALLOC_TYPEOF(ptr))talloc_size(ctx, sizeof(*(ptr))) #endif #ifdef DOXYGEN /** * @brief Allocate a new 0-sized talloc chunk. * * This is a utility macro that creates a new memory context hanging off an * existing context, automatically naming it "talloc_new: __location__" where * __location__ is the source line it is called from. It is particularly * useful for creating a new temporary working context. * * @param[in] ctx The talloc parent context. * * @return A new talloc chunk, NULL on error. */ _PUBLIC_ void *talloc_new(const void *ctx); #else #define talloc_new(ctx) talloc_named_const(ctx, 0, "talloc_new: " __location__) #endif #ifdef DOXYGEN /** * @brief Allocate a 0-initizialized structure. * * The macro is equivalent to: * * @code * ptr = talloc(ctx, type); * if (ptr) memset(ptr, 0, sizeof(type)); * @endcode * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @return Pointer to a piece of memory, properly cast to 'type *', * NULL on error. * * Example: * @code * unsigned int *a, *b; * a = talloc_zero(NULL, unsigned int); * b = talloc_zero(a, unsigned int); * @endcode * * @see talloc() * @see talloc_zero_size() * @see talloc_zero_array() */ _PUBLIC_ void *talloc_zero(const void *ctx, #type); /** * @brief Allocate untyped, 0-initialized memory. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] size Number of char's that you want to allocate. * * @return The allocated memory chunk. */ _PUBLIC_ void *talloc_zero_size(const void *ctx, size_t size); #else #define talloc_zero(ctx, type) (type *)_talloc_zero(ctx, sizeof(type), #type) #define talloc_zero_size(ctx, size) _talloc_zero(ctx, size, __location__) _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name); #endif /** * @brief Return the name of a talloc chunk. * * @param[in] ptr The talloc chunk. * * @return The current name for the given talloc pointer. * * @see talloc_set_name() */ _PUBLIC_ const char *talloc_get_name(const void *ptr); /** * @brief Verify that a talloc chunk carries a specified name. * * This function checks if a pointer has the specified name. If it does * then the pointer is returned. * * @param[in] ptr The talloc chunk to check. * * @param[in] name The name to check against. * * @return The pointer if the name matches, NULL if it doesn't. */ _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name); /** * @brief Get the parent chunk of a pointer. * * @param[in] ptr The talloc pointer to inspect. * * @return The talloc parent of ptr, NULL on error. */ _PUBLIC_ void *talloc_parent(const void *ptr); /** * @brief Get a talloc chunk's parent name. * * @param[in] ptr The talloc pointer to inspect. * * @return The name of ptr's parent chunk. */ _PUBLIC_ const char *talloc_parent_name(const void *ptr); /** * @brief Get the total size of a talloc chunk including its children. * * The function returns the total size in bytes used by this pointer and all * child pointers. Mostly useful for debugging. * * Passing NULL is allowed, but it will only give a meaningful result if * talloc_enable_leak_report() or talloc_enable_leak_report_full() has * been called. * * @param[in] ptr The talloc chunk. * * @return The total size. */ _PUBLIC_ size_t talloc_total_size(const void *ptr); /** * @brief Get the number of talloc chunks hanging off a chunk. * * The talloc_total_blocks() function returns the total memory block * count used by this pointer and all child pointers. Mostly useful for * debugging. * * Passing NULL is allowed, but it will only give a meaningful result if * talloc_enable_leak_report() or talloc_enable_leak_report_full() has * been called. * * @param[in] ptr The talloc chunk. * * @return The total size. */ _PUBLIC_ size_t talloc_total_blocks(const void *ptr); #ifdef DOXYGEN /** * @brief Duplicate a memory area into a talloc chunk. * * The function is equivalent to: * * @code * ptr = talloc_size(ctx, size); * if (ptr) memcpy(ptr, p, size); * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] p The memory chunk you want to duplicate. * * @param[in] size Number of char's that you want copy. * * @return The allocated memory chunk. * * @see talloc_size() */ _PUBLIC_ void *talloc_memdup(const void *t, const void *p, size_t size); #else #define talloc_memdup(t, p, size) _talloc_memdup(t, p, size, __location__) _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name); #endif #ifdef DOXYGEN /** * @brief Assign a type to a talloc chunk. * * This macro allows you to force the name of a pointer to be of a particular * type. This can be used in conjunction with talloc_get_type() to do type * checking on void* pointers. * * It is equivalent to this: * * @code * talloc_set_name_const(ptr, #type) * @endcode * * @param[in] ptr The talloc chunk to assign the type to. * * @param[in] type The type to assign. */ _PUBLIC_ void talloc_set_type(const char *ptr, #type); /** * @brief Get a typed pointer out of a talloc pointer. * * This macro allows you to do type checking on talloc pointers. It is * particularly useful for void* private pointers. It is equivalent to * this: * * @code * (type *)talloc_check_name(ptr, #type) * @endcode * * @param[in] ptr The talloc pointer to check. * * @param[in] type The type to check against. * * @return The properly casted pointer given by ptr, NULL on error. */ type *talloc_get_type(const void *ptr, #type); #else #define talloc_set_type(ptr, type) talloc_set_name_const(ptr, #type) #define talloc_get_type(ptr, type) (type *)talloc_check_name(ptr, #type) #endif #ifdef DOXYGEN /** * @brief Safely turn a void pointer into a typed pointer. * * This macro is used together with talloc(mem_ctx, struct foo). If you had to * assign the talloc chunk pointer to some void pointer variable, * talloc_get_type_abort() is the recommended way to get the convert the void * pointer back to a typed pointer. * * @param[in] ptr The void pointer to convert. * * @param[in] type The type that this chunk contains * * @return The same value as ptr, type-checked and properly cast. */ _PUBLIC_ void *talloc_get_type_abort(const void *ptr, #type); #else #ifdef TALLOC_GET_TYPE_ABORT_NOOP #define talloc_get_type_abort(ptr, type) (type *)(ptr) #else #define talloc_get_type_abort(ptr, type) (type *)_talloc_get_type_abort(ptr, #type, __location__) #endif _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location); #endif /** * @brief Find a parent context by name. * * Find a parent memory context of the current context that has the given * name. This can be very useful in complex programs where it may be * difficult to pass all information down to the level you need, but you * know the structure you want is a parent of another context. * * @param[in] ctx The talloc chunk to start from. * * @param[in] name The name of the parent we look for. * * @return The memory context we are looking for, NULL if not * found. */ _PUBLIC_ void *talloc_find_parent_byname(const void *ctx, const char *name); #ifdef DOXYGEN /** * @brief Find a parent context by type. * * Find a parent memory context of the current context that has the given * name. This can be very useful in complex programs where it may be * difficult to pass all information down to the level you need, but you * know the structure you want is a parent of another context. * * Like talloc_find_parent_byname() but takes a type, making it typesafe. * * @param[in] ptr The talloc chunk to start from. * * @param[in] type The type of the parent to look for. * * @return The memory context we are looking for, NULL if not * found. */ _PUBLIC_ void *talloc_find_parent_bytype(const void *ptr, #type); #else #define talloc_find_parent_bytype(ptr, type) (type *)talloc_find_parent_byname(ptr, #type) #endif /** * @brief Allocate a talloc pool. * * A talloc pool is a pure optimization for specific situations. In the * release process for Samba 3.2 we found out that we had become considerably * slower than Samba 3.0 was. Profiling showed that malloc(3) was a large CPU * consumer in benchmarks. For Samba 3.2 we have internally converted many * static buffers to dynamically allocated ones, so malloc(3) being beaten * more was no surprise. But it made us slower. * * talloc_pool() is an optimization to call malloc(3) a lot less for the use * pattern Samba has: The SMB protocol is mainly a request/response protocol * where we have to allocate a certain amount of memory per request and free * that after the SMB reply is sent to the client. * * talloc_pool() creates a talloc chunk that you can use as a talloc parent * exactly as you would use any other ::TALLOC_CTX. The difference is that * when you talloc a child of this pool, no malloc(3) is done. Instead, talloc * just increments a pointer inside the talloc_pool. This also works * recursively. If you use the child of the talloc pool as a parent for * grand-children, their memory is also taken from the talloc pool. * * If there is not enough memory in the pool to allocate the new child, * it will create a new talloc chunk as if the parent was a normal talloc * context. * * If you talloc_free() children of a talloc pool, the memory is not given * back to the system. Instead, free(3) is only called if the talloc_pool() * itself is released with talloc_free(). * * The downside of a talloc pool is that if you talloc_move() a child of a * talloc pool to a talloc parent outside the pool, the whole pool memory is * not free(3)'ed until that moved chunk is also talloc_free()ed. * * @param[in] context The talloc context to hang the result off. * * @param[in] size Size of the talloc pool. * * @return The allocated talloc pool, NULL on error. */ _PUBLIC_ void *talloc_pool(const void *context, size_t size); #ifdef DOXYGEN /** * @brief Allocate a talloc object as/with an additional pool. * * This is like talloc_pool(), but's it's more flexible * and allows an object to be a pool for its children. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @param[in] num_subobjects The expected number of subobjects, which will * be allocated within the pool. This allocates * space for talloc_chunk headers. * * @param[in] total_subobjects_size The size that all subobjects can use in total. * * * @return The allocated talloc object, NULL on error. */ _PUBLIC_ void *talloc_pooled_object(const void *ctx, #type, unsigned num_subobjects, size_t total_subobjects_size); #else #define talloc_pooled_object(_ctx, _type, \ _num_subobjects, \ _total_subobjects_size) \ (_type *)_talloc_pooled_object((_ctx), sizeof(_type), #_type, \ (_num_subobjects), \ (_total_subobjects_size)) _PUBLIC_ void *_talloc_pooled_object(const void *ctx, size_t type_size, const char *type_name, unsigned num_subobjects, size_t total_subobjects_size); #endif /** * @brief Free a talloc chunk and NULL out the pointer. * * TALLOC_FREE() frees a pointer and sets it to NULL. Use this if you want * immediate feedback (i.e. crash) if you use a pointer after having free'ed * it. * * @param[in] ctx The chunk to be freed. */ #define TALLOC_FREE(ctx) do { if (ctx != NULL) { talloc_free(ctx); ctx=NULL; } } while(0) /* @} ******************************************************************/ /** * \defgroup talloc_ref The talloc reference function. * @ingroup talloc * * This module contains the definitions around talloc references * * @{ */ /** * @brief Increase the reference count of a talloc chunk. * * The talloc_increase_ref_count(ptr) function is exactly equivalent to: * * @code * talloc_reference(NULL, ptr); * @endcode * * You can use either syntax, depending on which you think is clearer in * your code. * * @param[in] ptr The pointer to increase the reference count. * * @return 0 on success, -1 on error. */ _PUBLIC_ int talloc_increase_ref_count(const void *ptr); /** * @brief Get the number of references to a talloc chunk. * * @param[in] ptr The pointer to retrieve the reference count from. * * @return The number of references. */ _PUBLIC_ size_t talloc_reference_count(const void *ptr); #ifdef DOXYGEN /** * @brief Create an additional talloc parent to a pointer. * * The talloc_reference() function makes "context" an additional parent of * ptr. Each additional reference consumes around 48 bytes of memory on intel * x86 platforms. * * If ptr is NULL, then the function is a no-op, and simply returns NULL. * * After creating a reference you can free it in one of the following ways: * * - you can talloc_free() any parent of the original pointer. That * will reduce the number of parents of this pointer by 1, and will * cause this pointer to be freed if it runs out of parents. * * - you can talloc_free() the pointer itself if it has at maximum one * parent. This behaviour has been changed since the release of version * 2.0. Further information in the description of "talloc_free". * * For more control on which parent to remove, see talloc_unlink() * @param[in] ctx The additional parent. * * @param[in] ptr The pointer you want to create an additional parent for. * * @return The original pointer 'ptr', NULL if talloc ran out of * memory in creating the reference. * * @warning You should try to avoid using this interface. It turns a beautiful * talloc-tree into a graph. It is often really hard to debug if you * screw something up by accident. * * Example: * @code * unsigned int *a, *b, *c; * a = talloc(NULL, unsigned int); * b = talloc(NULL, unsigned int); * c = talloc(a, unsigned int); * // b also serves as a parent of c. * talloc_reference(b, c); * @endcode * * @see talloc_unlink() */ _PUBLIC_ void *talloc_reference(const void *ctx, const void *ptr); #else #define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference_loc((ctx),(ptr), __location__) _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location); #endif /** * @brief Remove a specific parent from a talloc chunk. * * The function removes a specific parent from ptr. The context passed must * either be a context used in talloc_reference() with this pointer, or must be * a direct parent of ptr. * * You can just use talloc_free() instead of talloc_unlink() if there * is at maximum one parent. This behaviour has been changed since the * release of version 2.0. Further information in the description of * "talloc_free". * * @param[in] context The talloc parent to remove. * * @param[in] ptr The talloc ptr you want to remove the parent from. * * @return 0 on success, -1 on error. * * @note If the parent has already been removed using talloc_free() then * this function will fail and will return -1. Likewise, if ptr is NULL, * then the function will make no modifications and return -1. * * @warning You should try to avoid using this interface. It turns a beautiful * talloc-tree into a graph. It is often really hard to debug if you * screw something up by accident. * * Example: * @code * unsigned int *a, *b, *c; * a = talloc(NULL, unsigned int); * b = talloc(NULL, unsigned int); * c = talloc(a, unsigned int); * // b also serves as a parent of c. * talloc_reference(b, c); * talloc_unlink(b, c); * @endcode */ _PUBLIC_ int talloc_unlink(const void *context, void *ptr); /** * @brief Provide a talloc context that is freed at program exit. * * This is a handy utility function that returns a talloc context * which will be automatically freed on program exit. This can be used * to reduce the noise in memory leak reports. * * Never use this in code that might be used in objects loaded with * dlopen and unloaded with dlclose. talloc_autofree_context() * internally uses atexit(3). Some platforms like modern Linux handles * this fine, but for example FreeBSD does not deal well with dlopen() * and atexit() used simultaneously: dlclose() does not clean up the * list of atexit-handlers, so when the program exits the code that * was registered from within talloc_autofree_context() is gone, the * program crashes at exit. * * @return A talloc context, NULL on error. */ _PUBLIC_ void *talloc_autofree_context(void) _DEPRECATED_; /** * @brief Get the size of a talloc chunk. * * This function lets you know the amount of memory allocated so far by * this context. It does NOT account for subcontext memory. * This can be used to calculate the size of an array. * * @param[in] ctx The talloc chunk. * * @return The size of the talloc chunk. */ _PUBLIC_ size_t talloc_get_size(const void *ctx); /** * @brief Show the parentage of a context. * * @param[in] context The talloc context to look at. * * @param[in] file The output to use, a file, stdout or stderr. */ _PUBLIC_ void talloc_show_parents(const void *context, FILE *file); /** * @brief Check if a context is parent of a talloc chunk. * * This checks if context is referenced in the talloc hierarchy above ptr. * * @param[in] context The assumed talloc context. * * @param[in] ptr The talloc chunk to check. * * @return Return 1 if this is the case, 0 if not. */ _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr); /** * @brief Change the parent context of a talloc pointer. * * The function changes the parent context of a talloc pointer. It is typically * used when the context that the pointer is currently a child of is going to be * freed and you wish to keep the memory for a longer time. * * The difference between talloc_reparent() and talloc_steal() is that * talloc_reparent() can specify which parent you wish to change. This is * useful when a pointer has multiple parents via references. * * @param[in] old_parent * @param[in] new_parent * @param[in] ptr * * @return Return the pointer you passed. It does not have any * failure modes. */ _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr); /* @} ******************************************************************/ /** * @defgroup talloc_array The talloc array functions * @ingroup talloc * * Talloc contains some handy helpers for handling Arrays conveniently * * @{ */ #ifdef DOXYGEN /** * @brief Allocate an array. * * The macro is equivalent to: * * @code * (type *)talloc_size(ctx, sizeof(type) * count); * @endcode * * except that it provides integer overflow protection for the multiply, * returning NULL if the multiply overflows. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @param[in] count The number of 'type' elements you want to allocate. * * @return The allocated result, properly cast to 'type *', NULL on * error. * * Example: * @code * unsigned int *a, *b; * a = talloc_zero(NULL, unsigned int); * b = talloc_array(a, unsigned int, 100); * @endcode * * @see talloc() * @see talloc_zero_array() */ _PUBLIC_ void *talloc_array(const void *ctx, #type, unsigned count); #else #define talloc_array(ctx, type, count) (type *)_talloc_array(ctx, sizeof(type), count, #type) _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name); #endif #ifdef DOXYGEN /** * @brief Allocate an array. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] size The size of an array element. * * @param[in] count The number of elements you want to allocate. * * @return The allocated result, NULL on error. */ _PUBLIC_ void *talloc_array_size(const void *ctx, size_t size, unsigned count); #else #define talloc_array_size(ctx, size, count) _talloc_array(ctx, size, count, __location__) #endif #ifdef DOXYGEN /** * @brief Allocate an array into a typed pointer. * * The macro should be used when you have a pointer to an array and want to * allocate memory of an array to point at with this pointer. When compiling * with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size() * and talloc_get_name() will return the current location in the source file * and not the type. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] ptr The pointer you want to assign the result to. * * @param[in] count The number of elements you want to allocate. * * @return The allocated memory chunk, properly casted. NULL on * error. */ void *talloc_array_ptrtype(const void *ctx, const void *ptr, unsigned count); #else #define talloc_array_ptrtype(ctx, ptr, count) (_TALLOC_TYPEOF(ptr))talloc_array_size(ctx, sizeof(*(ptr)), count) #endif #ifdef DOXYGEN /** * @brief Get the number of elements in a talloc'ed array. * * A talloc chunk carries its own size, so for talloc'ed arrays it is not * necessary to store the number of elements explicitly. * * @param[in] ctx The allocated array. * * @return The number of elements in ctx. */ size_t talloc_array_length(const void *ctx); #else #define talloc_array_length(ctx) (talloc_get_size(ctx)/sizeof(*ctx)) #endif #ifdef DOXYGEN /** * @brief Allocate a zero-initialized array * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @param[in] count The number of "type" elements you want to allocate. * * @return The allocated result casted to "type *", NULL on error. * * The talloc_zero_array() macro is equivalent to: * * @code * ptr = talloc_array(ctx, type, count); * if (ptr) memset(ptr, 0, sizeof(type) * count); * @endcode */ void *talloc_zero_array(const void *ctx, #type, unsigned count); #else #define talloc_zero_array(ctx, type, count) (type *)_talloc_zero_array(ctx, sizeof(type), count, #type) _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name); #endif #ifdef DOXYGEN /** * @brief Change the size of a talloc array. * * The macro changes the size of a talloc pointer. The 'count' argument is the * number of elements of type 'type' that you want the resulting pointer to * hold. * * talloc_realloc() has the following equivalences: * * @code * talloc_realloc(ctx, NULL, type, 1) ==> talloc(ctx, type); * talloc_realloc(ctx, NULL, type, N) ==> talloc_array(ctx, type, N); * talloc_realloc(ctx, ptr, type, 0) ==> talloc_free(ptr); * @endcode * * The "context" argument is only used if "ptr" is NULL, otherwise it is * ignored. * * @param[in] ctx The parent context used if ptr is NULL. * * @param[in] ptr The chunk to be resized. * * @param[in] type The type of the array element inside ptr. * * @param[in] count The intended number of array elements. * * @return The new array, NULL on error. The call will fail either * due to a lack of memory, or because the pointer has more * than one parent (see talloc_reference()). */ _PUBLIC_ void *talloc_realloc(const void *ctx, void *ptr, #type, size_t count); #else #define talloc_realloc(ctx, p, type, count) (type *)_talloc_realloc_array(ctx, p, sizeof(type), count, #type) _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name); #endif #ifdef DOXYGEN /** * @brief Untyped realloc to change the size of a talloc array. * * The macro is useful when the type is not known so the typesafe * talloc_realloc() cannot be used. * * @param[in] ctx The parent context used if 'ptr' is NULL. * * @param[in] ptr The chunk to be resized. * * @param[in] size The new chunk size. * * @return The new array, NULL on error. */ void *talloc_realloc_size(const void *ctx, void *ptr, size_t size); #else #define talloc_realloc_size(ctx, ptr, size) _talloc_realloc(ctx, ptr, size, __location__) _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name); #endif /** * @brief Provide a function version of talloc_realloc_size. * * This is a non-macro version of talloc_realloc(), which is useful as * libraries sometimes want a ralloc function pointer. A realloc() * implementation encapsulates the functionality of malloc(), free() and * realloc() in one call, which is why it is useful to be able to pass around * a single function pointer. * * @param[in] context The parent context used if ptr is NULL. * * @param[in] ptr The chunk to be resized. * * @param[in] size The new chunk size. * * @return The new chunk, NULL on error. */ _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size); /* @} ******************************************************************/ /** * @defgroup talloc_string The talloc string functions. * @ingroup talloc * * talloc string allocation and manipulation functions. * @{ */ /** * @brief Duplicate a string into a talloc chunk. * * This function is equivalent to: * * @code * ptr = talloc_size(ctx, strlen(p)+1); * if (ptr) memcpy(ptr, p, strlen(p)+1); * @endcode * * This functions sets the name of the new pointer to the passed * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] p The string you want to duplicate. * * @return The duplicated string, NULL on error. */ _PUBLIC_ char *talloc_strdup(const void *t, const char *p); /** * @brief Append a string to given string. * * The destination string is reallocated to take * strlen(s) + strlen(a) + 1 characters. * * This functions sets the name of the new pointer to the new * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination to append to. * * @param[in] a The string you want to append. * * @return The concatenated strings, NULL on error. * * @see talloc_strdup() * @see talloc_strdup_append_buffer() */ _PUBLIC_ char *talloc_strdup_append(char *s, const char *a); /** * @brief Append a string to a given buffer. * * This is a more efficient version of talloc_strdup_append(). It determines the * length of the destination string by the size of the talloc context. * * Use this very carefully as it produces a different result than * talloc_strdup_append() when a zero character is in the middle of the * destination string. * * @code * char *str_a = talloc_strdup(NULL, "hello world"); * char *str_b = talloc_strdup(NULL, "hello world"); * str_a[5] = str_b[5] = '\0' * * char *app = talloc_strdup_append(str_a, ", hello"); * char *buf = talloc_strdup_append_buffer(str_b, ", hello"); * * printf("%s\n", app); // hello, hello (app = "hello, hello") * printf("%s\n", buf); // hello (buf = "hello\0world, hello") * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination buffer to append to. * * @param[in] a The string you want to append. * * @return The concatenated strings, NULL on error. * * @see talloc_strdup() * @see talloc_strdup_append() * @see talloc_array_length() */ _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a); /** * @brief Duplicate a length-limited string into a talloc chunk. * * This function is the talloc equivalent of the C library function strndup(3). * * This functions sets the name of the new pointer to the passed string. This is * equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] p The string you want to duplicate. * * @param[in] n The maximum string length to duplicate. * * @return The duplicated string, NULL on error. */ _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n); /** * @brief Append at most n characters of a string to given string. * * The destination string is reallocated to take * strlen(s) + strnlen(a, n) + 1 characters. * * This functions sets the name of the new pointer to the new * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination string to append to. * * @param[in] a The source string you want to append. * * @param[in] n The number of characters you want to append from the * string. * * @return The concatenated strings, NULL on error. * * @see talloc_strndup() * @see talloc_strndup_append_buffer() */ _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n); /** * @brief Append at most n characters of a string to given buffer * * This is a more efficient version of talloc_strndup_append(). It determines * the length of the destination string by the size of the talloc context. * * Use this very carefully as it produces a different result than * talloc_strndup_append() when a zero character is in the middle of the * destination string. * * @code * char *str_a = talloc_strdup(NULL, "hello world"); * char *str_b = talloc_strdup(NULL, "hello world"); * str_a[5] = str_b[5] = '\0' * * char *app = talloc_strndup_append(str_a, ", hello", 7); * char *buf = talloc_strndup_append_buffer(str_b, ", hello", 7); * * printf("%s\n", app); // hello, hello (app = "hello, hello") * printf("%s\n", buf); // hello (buf = "hello\0world, hello") * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination buffer to append to. * * @param[in] a The source string you want to append. * * @param[in] n The number of characters you want to append from the * string. * * @return The concatenated strings, NULL on error. * * @see talloc_strndup() * @see talloc_strndup_append() * @see talloc_array_length() */ _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n); /** * @brief Format a string given a va_list. * * This function is the talloc equivalent of the C library function * vasprintf(3). * * This functions sets the name of the new pointer to the new string. This is * equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] fmt The format string. * * @param[in] ap The parameters used to fill fmt. * * @return The formatted string, NULL on error. */ _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); /** * @brief Format a string given a va_list and append it to the given destination * string. * * @param[in] s The destination string to append to. * * @param[in] fmt The format string. * * @param[in] ap The parameters used to fill fmt. * * @return The formatted string, NULL on error. * * @see talloc_vasprintf() */ _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); /** * @brief Format a string given a va_list and append it to the given destination * buffer. * * @param[in] s The destination buffer to append to. * * @param[in] fmt The format string. * * @param[in] ap The parameters used to fill fmt. * * @return The formatted string, NULL on error. * * @see talloc_vasprintf() */ _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); /** * @brief Format a string. * * This function is the talloc equivalent of the C library function asprintf(3). * * This functions sets the name of the new pointer to the new string. This is * equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] fmt The format string. * * @param[in] ... The parameters used to fill fmt. * * @return The formatted string, NULL on error. */ _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); /** * @brief Append a formatted string to another string. * * This function appends the given formatted string to the given string. Use * this variant when the string in the current talloc buffer may have been * truncated in length. * * This functions sets the name of the new pointer to the new * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * If s == NULL then new context is created. * * @param[in] s The string to append to. * * @param[in] fmt The format string. * * @param[in] ... The parameters used to fill fmt. * * @return The formatted string, NULL on error. */ _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); /** * @brief Append a formatted string to another string. * * This is a more efficient version of talloc_asprintf_append(). It determines * the length of the destination string by the size of the talloc context. * * Use this very carefully as it produces a different result than * talloc_asprintf_append() when a zero character is in the middle of the * destination string. * * @code * char *str_a = talloc_strdup(NULL, "hello world"); * char *str_b = talloc_strdup(NULL, "hello world"); * str_a[5] = str_b[5] = '\0' * * char *app = talloc_asprintf_append(str_a, "%s", ", hello"); * char *buf = talloc_strdup_append_buffer(str_b, "%s", ", hello"); * * printf("%s\n", app); // hello, hello (app = "hello, hello") * printf("%s\n", buf); // hello (buf = "hello\0world, hello") * @endcode * * If s == NULL then new context is created. * * @param[in] s The string to append to * * @param[in] fmt The format string. * * @param[in] ... The parameters used to fill fmt. * * @return The formatted string, NULL on error. * * @see talloc_asprintf() * @see talloc_asprintf_append() */ _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); /* @} ******************************************************************/ /** * @defgroup talloc_debug The talloc debugging support functions * @ingroup talloc * * To aid memory debugging, talloc contains routines to inspect the currently * allocated memory hierarchy. * * @{ */ /** * @brief Walk a complete talloc hierarchy. * * This provides a more flexible reports than talloc_report(). It * will recursively call the callback for the entire tree of memory * referenced by the pointer. References in the tree are passed with * is_ref = 1 and the pointer that is referenced. * * You can pass NULL for the pointer, in which case a report is * printed for the top level memory context, but only if * talloc_enable_leak_report() or talloc_enable_leak_report_full() * has been called. * * The recursion is stopped when depth >= max_depth. * max_depth = -1 means only stop at leaf nodes. * * @param[in] ptr The talloc chunk. * * @param[in] depth Internal parameter to control recursion. Call with 0. * * @param[in] max_depth Maximum recursion level. * * @param[in] callback Function to be called on every chunk. * * @param[in] private_data Private pointer passed to callback. */ _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, void (*callback)(const void *ptr, int depth, int max_depth, int is_ref, void *private_data), void *private_data); /** * @brief Print a talloc hierarchy. * * This provides a more flexible reports than talloc_report(). It * will let you specify the depth and max_depth. * * @param[in] ptr The talloc chunk. * * @param[in] depth Internal parameter to control recursion. Call with 0. * * @param[in] max_depth Maximum recursion level. * * @param[in] f The file handle to print to. */ _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f); /** * @brief Print a summary report of all memory used by ptr. * * This provides a more detailed report than talloc_report(). It will * recursively print the entire tree of memory referenced by the * pointer. References in the tree are shown by giving the name of the * pointer that is referenced. * * You can pass NULL for the pointer, in which case a report is printed * for the top level memory context, but only if * talloc_enable_leak_report() or talloc_enable_leak_report_full() has * been called. * * @param[in] ptr The talloc chunk. * * @param[in] f The file handle to print to. * * Example: * @code * unsigned int *a, *b; * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * fprintf(stderr, "Dumping memory tree for a:\n"); * talloc_report_full(a, stderr); * @endcode * * @see talloc_report() */ _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f); /** * @brief Print a summary report of all memory used by ptr. * * This function prints a summary report of all memory used by ptr. One line of * report is printed for each immediate child of ptr, showing the total memory * and number of blocks used by that child. * * You can pass NULL for the pointer, in which case a report is printed * for the top level memory context, but only if talloc_enable_leak_report() * or talloc_enable_leak_report_full() has been called. * * @param[in] ptr The talloc chunk. * * @param[in] f The file handle to print to. * * Example: * @code * unsigned int *a, *b; * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * fprintf(stderr, "Summary of memory tree for a:\n"); * talloc_report(a, stderr); * @endcode * * @see talloc_report_full() */ _PUBLIC_ void talloc_report(const void *ptr, FILE *f); /** * @brief Enable tracking the use of NULL memory contexts. * * This enables tracking of the NULL memory context without enabling leak * reporting on exit. Useful for when you want to do your own leak * reporting call via talloc_report_null_full(); */ _PUBLIC_ void talloc_enable_null_tracking(void); /** * @brief Enable tracking the use of NULL memory contexts. * * This enables tracking of the NULL memory context without enabling leak * reporting on exit. Useful for when you want to do your own leak * reporting call via talloc_report_null_full(); */ _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void); /** * @brief Disable tracking of the NULL memory context. * * This disables tracking of the NULL memory context. */ _PUBLIC_ void talloc_disable_null_tracking(void); /** * @brief Enable leak report when a program exits. * * This enables calling of talloc_report(NULL, stderr) when the program * exits. In Samba4 this is enabled by using the --leak-report command * line option. * * For it to be useful, this function must be called before any other * talloc function as it establishes a "null context" that acts as the * top of the tree. If you don't call this function first then passing * NULL to talloc_report() or talloc_report_full() won't give you the * full tree printout. * * Here is a typical talloc report: * * @code * talloc report on 'null_context' (total 267 bytes in 15 blocks) * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks * iconv(UTF8,CP850) contains 42 bytes in 2 blocks * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks * iconv(CP850,UTF8) contains 42 bytes in 2 blocks * iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks * iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks * @endcode */ _PUBLIC_ void talloc_enable_leak_report(void); /** * @brief Enable full leak report when a program exits. * * This enables calling of talloc_report_full(NULL, stderr) when the * program exits. In Samba4 this is enabled by using the * --leak-report-full command line option. * * For it to be useful, this function must be called before any other * talloc function as it establishes a "null context" that acts as the * top of the tree. If you don't call this function first then passing * NULL to talloc_report() or talloc_report_full() won't give you the * full tree printout. * * Here is a typical full report: * * @code * full talloc report on 'root' (total 18 bytes in 8 blocks) * p1 contains 18 bytes in 7 blocks (ref 0) * r1 contains 13 bytes in 2 blocks (ref 0) * reference to: p2 * p2 contains 1 bytes in 1 blocks (ref 1) * x3 contains 1 bytes in 1 blocks (ref 0) * x2 contains 1 bytes in 1 blocks (ref 0) * x1 contains 1 bytes in 1 blocks (ref 0) * @endcode */ _PUBLIC_ void talloc_enable_leak_report_full(void); /** * @brief Set a custom "abort" function that is called on serious error. * * The default "abort" function is abort(). * * The "abort" function is called when: * *
    *
  • talloc_get_type_abort() fails
  • *
  • the provided pointer is not a valid talloc context
  • *
  • when the context meta data are invalid
  • *
  • when access after free is detected
  • *
* * Example: * * @code * void my_abort(const char *reason) * { * fprintf(stderr, "talloc abort: %s\n", reason); * abort(); * } * * talloc_set_abort_fn(my_abort); * @endcode * * @param[in] abort_fn The new "abort" function. * * @see talloc_set_log_fn() * @see talloc_get_type() */ _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason)); /** * @brief Set a logging function. * * @param[in] log_fn The logging function. * * @see talloc_set_log_stderr() * @see talloc_set_abort_fn() */ _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message)); /** * @brief Set stderr as the output for logs. * * @see talloc_set_log_fn() * @see talloc_set_abort_fn() */ _PUBLIC_ void talloc_set_log_stderr(void); /** * @brief Set a max memory limit for the current context hierarchy * This affects all children of this context and constrain any * allocation in the hierarchy to never exceed the limit set. * The limit can be removed by setting 0 (unlimited) as the * max_size by calling the function again on the same context. * Memory limits can also be nested, meaning a child can have * a stricter memory limit than a parent. * Memory limits are enforced only at memory allocation time. * Stealing a context into a 'limited' hierarchy properly * updates memory usage but does *not* cause failure if the * move causes the new parent to exceed its limits. However * any further allocation on that hierarchy will then fail. * * @warning talloc memlimit functionality is deprecated. Please * consider using cgroup memory limits instead. * * @param[in] ctx The talloc context to set the limit on * @param[in] max_size The (new) max_size */ _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size) _DEPRECATED_; /* @} ******************************************************************/ #if TALLOC_DEPRECATED #define talloc_zero_p(ctx, type) talloc_zero(ctx, type) #define talloc_p(ctx, type) talloc(ctx, type) #define talloc_array_p(ctx, type, count) talloc_array(ctx, type, count) #define talloc_realloc_p(ctx, p, type, count) talloc_realloc(ctx, p, type, count) #define talloc_destroy(ctx) talloc_free(ctx) #define talloc_append_string(c, s, a) (s?talloc_strdup_append(s,a):talloc_strdup(c, a)) #endif #ifndef TALLOC_MAX_DEPTH #define TALLOC_MAX_DEPTH 10000 #endif #ifdef __cplusplus } /* end of extern "C" */ #endif #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/talloc.pc.in0000660000000000000000000000043700000000000017272 0ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: talloc Description: A hierarchical pool based memory system with destructors Version: @TALLOC_VERSION@ Libs: @LIB_RPATH@ -L${libdir} -ltalloc Cflags: -I${includedir} URL: http://talloc.samba.org/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/talloc_guide.txt0000660000000000000000000007272000000000000020263 0ustar00rootroot00000000000000Using talloc in Samba4 ====================== .. contents:: Andrew Tridgell August 2009 The most current version of this document is available at http://samba.org/ftp/unpacked/talloc/talloc_guide.txt If you are used to the "old" talloc from Samba3 before 3.0.20 then please read this carefully, as talloc has changed a lot. With 3.0.20 (or 3.0.14?) the Samba4 talloc has been ported back to Samba3, so this guide applies to both. The new talloc is a hierarchical, reference counted memory pool system with destructors. Quite a mouthful really, but not too bad once you get used to it. Perhaps the biggest change from Samba3 is that there is no distinction between a "talloc context" and a "talloc pointer". Any pointer returned from talloc() is itself a valid talloc context. This means you can do this:: struct foo *X = talloc(mem_ctx, struct foo); X->name = talloc_strdup(X, "foo"); and the pointer X->name would be a "child" of the talloc context "X" which is itself a child of "mem_ctx". So if you do talloc_free(mem_ctx) then it is all destroyed, whereas if you do talloc_free(X) then just X and X->name are destroyed, and if you do talloc_free(X->name) then just the name element of X is destroyed. If you think about this, then what this effectively gives you is an n-ary tree, where you can free any part of the tree with talloc_free(). If you find this confusing, then I suggest you run the testsuite to watch talloc in action. You may also like to add your own tests to testsuite.c to clarify how some particular situation is handled. Performance ----------- All the additional features of talloc() over malloc() do come at a price. We have a simple performance test in Samba4 that measures talloc() versus malloc() performance, and it seems that talloc() is about 4% slower than malloc() on my x86 Debian Linux box. For Samba, the great reduction in code complexity that we get by using talloc makes this worthwhile, especially as the total overhead of talloc/malloc in Samba is already quite small. talloc API ---------- The following is a complete guide to the talloc API. Read it all at least twice. Multi-threading --------------- talloc itself does not deal with threads. It is thread-safe (assuming the underlying "malloc" is), as long as each thread uses different memory contexts. If two threads use the same context then they need to synchronize in order to be safe. In particular: - when using talloc_enable_leak_report(), giving directly NULL as a parent context implicitly refers to a hidden "null context" global variable, so this should not be used in a multi-threaded environment without proper synchronization. In threaded code turn off null tracking using talloc_disable_null_tracking(). ; - the context returned by talloc_autofree_context() is also global so shouldn't be used by several threads simultaneously without synchronization. talloc and shared objects ------------------------- talloc can be used in shared objects. Special care needs to be taken to never use talloc_autofree_context() in code that might be loaded with dlopen() and unloaded with dlclose(), as talloc_autofree_context() internally uses atexit(3). Some platforms like modern Linux handles this fine, but for example FreeBSD does not deal well with dlopen() and atexit() used simultaneously: dlclose() does not clean up the list of atexit-handlers, so when the program exits the code that was registered from within talloc_autofree_context() is gone, the program crashes at exit. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- (type *)talloc(const void *context, type); The talloc() macro is the core of the talloc library. It takes a memory context and a type, and returns a pointer to a new area of memory of the given type. The returned pointer is itself a talloc context, so you can use it as the context argument to more calls to talloc if you wish. The returned pointer is a "child" of the supplied context. This means that if you talloc_free() the context then the new child disappears as well. Alternatively you can free just the child. The context argument to talloc() can be NULL, in which case a new top level context is created. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_size(const void *context, size_t size); The function talloc_size() should be used when you don't have a convenient type to pass to talloc(). Unlike talloc(), it is not type safe (as it returns a void *), so you are on your own for type checking. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- (typeof(ptr)) talloc_ptrtype(const void *ctx, ptr); The talloc_ptrtype() macro should be used when you have a pointer and want to allocate memory to point at with this pointer. When compiling with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() and talloc_get_name() will return the current location in the source file. and not the type. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- int talloc_free(void *ptr); The talloc_free() function frees a piece of talloc memory, and all its children. You can call talloc_free() on any pointer returned by talloc(). The return value of talloc_free() indicates success or failure, with 0 returned for success and -1 for failure. A possible failure condition is if the pointer had a destructor attached to it and the destructor returned -1. See talloc_set_destructor() for details on destructors. Likewise, if "ptr" is NULL, then the function will make no modifications and returns -1. From version 2.0 and onwards, as a special case, talloc_free() is refused on pointers that have more than one parent associated, as talloc would have no way of knowing which parent should be removed. This is different from older versions in the sense that always the reference to the most recently established parent has been destroyed. Hence to free a pointer that has more than one parent please use talloc_unlink(). To help you find problems in your code caused by this behaviour, if you do try and free a pointer with more than one parent then the talloc logging function will be called to give output like this: ERROR: talloc_free with references at some_dir/source/foo.c:123 reference at some_dir/source/other.c:325 reference at some_dir/source/third.c:121 Please see the documentation for talloc_set_log_fn() and talloc_set_log_stderr() for more information on talloc logging functions. talloc_free() operates recursively on its children. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_free_children(void *ptr); The talloc_free_children() walks along the list of all children of a talloc context and talloc_free()s only the children, not the context itself. A NULL argument is handled as no-op. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_reference(const void *context, const void *ptr); The talloc_reference() function makes "context" an additional parent of "ptr". The return value of talloc_reference() is always the original pointer "ptr", unless talloc ran out of memory in creating the reference in which case it will return NULL (each additional reference consumes around 48 bytes of memory on intel x86 platforms). If "ptr" is NULL, then the function is a no-op, and simply returns NULL. After creating a reference you can free it in one of the following ways: - you can talloc_free() any parent of the original pointer. That will reduce the number of parents of this pointer by 1, and will cause this pointer to be freed if it runs out of parents. - you can talloc_free() the pointer itself if it has at maximum one parent. This behaviour has been changed since the release of version 2.0. Further information in the description of "talloc_free". For more control on which parent to remove, see talloc_unlink() =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- int talloc_unlink(const void *context, void *ptr); The talloc_unlink() function removes a specific parent from ptr. The context passed must either be a context used in talloc_reference() with this pointer, or must be a direct parent of ptr. Note that if the parent has already been removed using talloc_free() then this function will fail and will return -1. Likewise, if "ptr" is NULL, then the function will make no modifications and return -1. You can just use talloc_free() instead of talloc_unlink() if there is at maximum one parent. This behaviour has been changed since the release of version 2.0. Further information in the description of "talloc_free". =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_set_destructor(const void *ptr, int (*destructor)(void *)); The function talloc_set_destructor() sets the "destructor" for the pointer "ptr". A destructor is a function that is called when the memory used by a pointer is about to be released. The destructor receives the pointer as an argument, and should return 0 for success and -1 for failure. The destructor can do anything it wants to, including freeing other pieces of memory. A common use for destructors is to clean up operating system resources (such as open file descriptors) contained in the structure the destructor is placed on. You can only place one destructor on a pointer. If you need more than one destructor then you can create a zero-length child of the pointer and place an additional destructor on that. To remove a destructor call talloc_set_destructor() with NULL for the destructor. If your destructor attempts to talloc_free() the pointer that it is the destructor for then talloc_free() will return -1 and the free will be ignored. This would be a pointless operation anyway, as the destructor is only called when the memory is just about to go away. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- int talloc_increase_ref_count(const void *ptr); The talloc_increase_ref_count(ptr) function is exactly equivalent to: talloc_reference(NULL, ptr); You can use either syntax, depending on which you think is clearer in your code. It returns 0 on success and -1 on failure. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- size_t talloc_reference_count(const void *ptr); Return the number of references to the pointer. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_set_name(const void *ptr, const char *fmt, ...); Each talloc pointer has a "name". The name is used principally for debugging purposes, although it is also possible to set and get the name on a pointer in as a way of "marking" pointers in your code. The main use for names on pointer is for "talloc reports". See talloc_report() and talloc_report_full() for details. Also see talloc_enable_leak_report() and talloc_enable_leak_report_full(). The talloc_set_name() function allocates memory as a child of the pointer. It is logically equivalent to: talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...)); Note that multiple calls to talloc_set_name() will allocate more memory without releasing the name. All of the memory is released when the ptr is freed using talloc_free(). =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_set_name_const(const void *ptr, const char *name); The function talloc_set_name_const() is just like talloc_set_name(), but it takes a string constant, and is much faster. It is extensively used by the "auto naming" macros, such as talloc_p(). This function does not allocate any memory. It just copies the supplied pointer into the internal representation of the talloc ptr. This means you must not pass a name pointer to memory that will disappear before the ptr is freed with talloc_free(). =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_named(const void *context, size_t size, const char *fmt, ...); The talloc_named() function creates a named talloc pointer. It is equivalent to: ptr = talloc_size(context, size); talloc_set_name(ptr, fmt, ....); =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_named_const(const void *context, size_t size, const char *name); This is equivalent to:: ptr = talloc_size(context, size); talloc_set_name_const(ptr, name); =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- const char *talloc_get_name(const void *ptr); This returns the current name for the given talloc pointer. See talloc_set_name() for details. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_init(const char *fmt, ...); This function creates a zero length named talloc context as a top level context. It is equivalent to:: talloc_named(NULL, 0, fmt, ...); =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_new(void *ctx); This is a utility macro that creates a new memory context hanging off an exiting context, automatically naming it "talloc_new: __location__" where __location__ is the source line it is called from. It is particularly useful for creating a new temporary working context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- (type *)talloc_realloc(const void *context, void *ptr, type, count); The talloc_realloc() macro changes the size of a talloc pointer. The "count" argument is the number of elements of type "type" that you want the resulting pointer to hold. talloc_realloc() has the following equivalences:: talloc_realloc(context, NULL, type, 1) ==> talloc(context, type); talloc_realloc(context, NULL, type, N) ==> talloc_array(context, type, N); talloc_realloc(context, ptr, type, 0) ==> talloc_free(ptr); The "context" argument is only used if "ptr" is NULL, otherwise it is ignored. talloc_realloc() returns the new pointer, or NULL on failure. The call will fail either due to a lack of memory, or because the pointer has more than one parent (see talloc_reference()). =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_realloc_size(const void *context, void *ptr, size_t size); the talloc_realloc_size() function is useful when the type is not known so the typesafe talloc_realloc() cannot be used. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_steal(const void *new_ctx, const void *ptr); The talloc_steal() function changes the parent context of a talloc pointer. It is typically used when the context that the pointer is currently a child of is going to be freed and you wish to keep the memory for a longer time. The talloc_steal() function returns the pointer that you pass it. It does not have any failure modes. NOTE: It is possible to produce loops in the parent/child relationship if you are not careful with talloc_steal(). No guarantees are provided as to your sanity or the safety of your data if you do this. talloc_steal (new_ctx, NULL) will return NULL with no sideeffects. Note that if you try and call talloc_steal() on a pointer that has more than one parent then the result is ambiguous. Talloc will choose to remove the parent that is currently indicated by talloc_parent() and replace it with the chosen parent. You will also get a message like this via the talloc logging functions: WARNING: talloc_steal with references at some_dir/source/foo.c:123 reference at some_dir/source/other.c:325 reference at some_dir/source/third.c:121 To unambiguously change the parent of a pointer please see the function talloc_reparent(). See the talloc_set_log_fn() documentation for more information on talloc logging. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr); The talloc_reparent() function changes the parent context of a talloc pointer. It is typically used when the context that the pointer is currently a child of is going to be freed and you wish to keep the memory for a longer time. The talloc_reparent() function returns the pointer that you pass it. It does not have any failure modes. The difference between talloc_reparent() and talloc_steal() is that talloc_reparent() can specify which parent you wish to change. This is useful when a pointer has multiple parents via references. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_parent(const void *ptr); The talloc_parent() function returns the current talloc parent. This is usually the pointer under which this memory was originally created, but it may have changed due to a talloc_steal() or talloc_reparent() =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- size_t talloc_total_size(const void *ptr); The talloc_total_size() function returns the total size in bytes used by this pointer and all child pointers. Mostly useful for debugging. Passing NULL is allowed, but it will only give a meaningful result if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- size_t talloc_total_blocks(const void *ptr); The talloc_total_blocks() function returns the total memory block count used by this pointer and all child pointers. Mostly useful for debugging. Passing NULL is allowed, but it will only give a meaningful result if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, void (*callback)(const void *ptr, int depth, int max_depth, int is_ref, void *priv), void *priv); This provides a more flexible reports than talloc_report(). It will recursively call the callback for the entire tree of memory referenced by the pointer. References in the tree are passed with is_ref = 1 and the pointer that is referenced. You can pass NULL for the pointer, in which case a report is printed for the top level memory context, but only if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. The recursion is stopped when depth >= max_depth. max_depth = -1 means only stop at leaf nodes. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f); This provides a more flexible reports than talloc_report(). It will let you specify the depth and max_depth. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_report(const void *ptr, FILE *f); The talloc_report() function prints a summary report of all memory used by ptr. One line of report is printed for each immediate child of ptr, showing the total memory and number of blocks used by that child. You can pass NULL for the pointer, in which case a report is printed for the top level memory context, but only if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_report_full(const void *ptr, FILE *f); This provides a more detailed report than talloc_report(). It will recursively print the entire tree of memory referenced by the pointer. References in the tree are shown by giving the name of the pointer that is referenced. You can pass NULL for the pointer, in which case a report is printed for the top level memory context, but only if talloc_enable_leak_report() or talloc_enable_leak_report_full() has been called. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_enable_leak_report(void); This enables calling of talloc_report(NULL, stderr) when the program exits. In Samba4 this is enabled by using the --leak-report command line option. For it to be useful, this function must be called before any other talloc function as it establishes a "null context" that acts as the top of the tree. If you don't call this function first then passing NULL to talloc_report() or talloc_report_full() won't give you the full tree printout. Here is a typical talloc report: talloc report on 'null_context' (total 267 bytes in 15 blocks) libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks iconv(UTF8,CP850) contains 42 bytes in 2 blocks libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks iconv(CP850,UTF8) contains 42 bytes in 2 blocks iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_enable_leak_report_full(void); This enables calling of talloc_report_full(NULL, stderr) when the program exits. In Samba4 this is enabled by using the --leak-report-full command line option. For it to be useful, this function must be called before any other talloc function as it establishes a "null context" that acts as the top of the tree. If you don't call this function first then passing NULL to talloc_report() or talloc_report_full() won't give you the full tree printout. Here is a typical full report: full talloc report on 'root' (total 18 bytes in 8 blocks) p1 contains 18 bytes in 7 blocks (ref 0) r1 contains 13 bytes in 2 blocks (ref 0) reference to: p2 p2 contains 1 bytes in 1 blocks (ref 1) x3 contains 1 bytes in 1 blocks (ref 0) x2 contains 1 bytes in 1 blocks (ref 0) x1 contains 1 bytes in 1 blocks (ref 0) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_enable_null_tracking(void); This enables tracking of the NULL memory context without enabling leak reporting on exit. Useful for when you want to do your own leak reporting call via talloc_report_null_full(); =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_disable_null_tracking(void); This disables tracking of the NULL memory context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- (type *)talloc_zero(const void *ctx, type); The talloc_zero() macro is equivalent to:: ptr = talloc(ctx, type); if (ptr) memset(ptr, 0, sizeof(type)); =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_zero_size(const void *ctx, size_t size) The talloc_zero_size() function is useful when you don't have a known type =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_memdup(const void *ctx, const void *p, size_t size); The talloc_memdup() function is equivalent to:: ptr = talloc_size(ctx, size); if (ptr) memcpy(ptr, p, size); =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- char *talloc_strdup(const void *ctx, const char *p); The talloc_strdup() function is equivalent to:: ptr = talloc_size(ctx, strlen(p)+1); if (ptr) memcpy(ptr, p, strlen(p)+1); This functions sets the name of the new pointer to the passed string. This is equivalent to:: talloc_set_name_const(ptr, ptr) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- char *talloc_strndup(const void *t, const char *p, size_t n); The talloc_strndup() function is the talloc equivalent of the C library function strndup() This functions sets the name of the new pointer to the passed string. This is equivalent to: talloc_set_name_const(ptr, ptr) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- char *talloc_append_string(const void *t, char *orig, const char *append); The talloc_append_string() function appends the given formatted string to the given string. This function sets the name of the new pointer to the new string. This is equivalent to:: talloc_set_name_const(ptr, ptr) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- char *talloc_vasprintf(const void *t, const char *fmt, va_list ap); The talloc_vasprintf() function is the talloc equivalent of the C library function vasprintf() This functions sets the name of the new pointer to the new string. This is equivalent to:: talloc_set_name_const(ptr, ptr) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- char *talloc_asprintf(const void *t, const char *fmt, ...); The talloc_asprintf() function is the talloc equivalent of the C library function asprintf() This functions sets the name of the new pointer to the new string. This is equivalent to:: talloc_set_name_const(ptr, ptr) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- char *talloc_asprintf_append(char *s, const char *fmt, ...); The talloc_asprintf_append() function appends the given formatted string to the given string. Use this variant when the string in the current talloc buffer may have been truncated in length. This functions sets the name of the new pointer to the new string. This is equivalent to:: talloc_set_name_const(ptr, ptr) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...); The talloc_asprintf_append() function appends the given formatted string to the end of the currently allocated talloc buffer. Use this variant when the string in the current talloc buffer has not been changed. This functions sets the name of the new pointer to the new string. This is equivalent to:: talloc_set_name_const(ptr, ptr) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- ((type *)talloc_array(const void *ctx, type, unsigned int count); The talloc_array() macro is equivalent to:: (type *)talloc_size(ctx, sizeof(type) * count); except that it provides integer overflow protection for the multiply, returning NULL if the multiply overflows. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_array_size(const void *ctx, size_t size, unsigned int count); The talloc_array_size() function is useful when the type is not known. It operates in the same way as talloc_array(), but takes a size instead of a type. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- (typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, unsigned int count); The talloc_ptrtype() macro should be used when you have a pointer to an array and want to allocate memory of an array to point at with this pointer. When compiling with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size() and talloc_get_name() will return the current location in the source file. and not the type. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_realloc_fn(const void *ctx, void *ptr, size_t size); This is a non-macro version of talloc_realloc(), which is useful as libraries sometimes want a ralloc function pointer. A realloc() implementation encapsulates the functionality of malloc(), free() and realloc() in one call, which is why it is useful to be able to pass around a single function pointer. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_autofree_context(void); This is a handy utility function that returns a talloc context which will be automatically freed on program exit. This can be used to reduce the noise in memory leak reports. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_check_name(const void *ptr, const char *name); This function checks if a pointer has the specified name. If it does then the pointer is returned. It it doesn't then NULL is returned. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- (type *)talloc_get_type(const void *ptr, type); This macro allows you to do type checking on talloc pointers. It is particularly useful for void* private pointers. It is equivalent to this:: (type *)talloc_check_name(ptr, #type) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- talloc_set_type(const void *ptr, type); This macro allows you to force the name of a pointer to be of a particular type. This can be used in conjunction with talloc_get_type() to do type checking on void* pointers. It is equivalent to this:: talloc_set_name_const(ptr, #type) =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- talloc_get_size(const void *ctx); This function lets you know the amount of memory allocated so far by this context. It does NOT account for subcontext memory. This can be used to calculate the size of an array. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void *talloc_find_parent_byname(const void *ctx, const char *name); Find a parent memory context of the current context that has the given name. This can be very useful in complex programs where it may be difficult to pass all information down to the level you need, but you know the structure you want is a parent of another context. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- (type *)talloc_find_parent_bytype(ctx, type); Like talloc_find_parent_byname() but takes a type, making it typesafe. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_set_log_fn(void (*log_fn)(const char *message)); This function sets a logging function that talloc will use for warnings and errors. By default talloc will not print any warnings or errors. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- void talloc_set_log_stderr(void) This sets the talloc log function to write log messages to stderr. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/talloc_testsuite.h0000660000000000000000000000025600000000000020622 0ustar00rootroot00000000000000#ifndef __LIB_TALLOC_TALLOC_TESTSUITE_H__ #define __LIB_TALLOC_TALLOC_TESTSUITE_H__ struct torture_context; bool torture_local_talloc(struct torture_context *tctx); #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/test_magic_differs.sh0000770000000000000000000000055100000000000021237 0ustar00rootroot00000000000000#!/bin/sh # This test ensures that two different talloc processes do not use the same # magic value to lessen the opportunity for transferrable attacks. echo "test: magic differs" helper=$1 m1=$($helper) m2=$($helper) if [ $m1 -eq $m2 ]; then echo "failure: magic remained the same between executions ($m1 vs $m2)" exit 1 fi echo "success: magic differs" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/test_magic_differs_helper.c0000660000000000000000000000052100000000000022401 0ustar00rootroot00000000000000#include #include "talloc.h" /* * This program is called by a testing shell script in order to ensure that * if the library is loaded into different processes it uses different magic * values in order to thwart security attacks. */ int main(int argc, char *argv[]) { printf("%i\n", talloc_test_get_magic()); return 0; } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1594296290.454105 tevent-0.11.0/lib/talloc/test_pytalloc.c0000660000000000000000000001505100000000000020113 0ustar00rootroot00000000000000/* Samba Unix SMB/CIFS implementation. C utilities for the pytalloc test suite. Provides the "_test_pytalloc" Python module. NOTE: Please read talloc_guide.txt for full documentation Copyright (C) Petr Viktorin 2015 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include #include static PyObject *testpytalloc_new(PyTypeObject *mod, PyObject *Py_UNUSED(ignored)) { char *obj = talloc_strdup(NULL, "This is a test string");; return pytalloc_steal(pytalloc_GetObjectType(), obj); } static PyObject *testpytalloc_get_object_type(PyObject *mod, PyObject *Py_UNUSED(ignored)) { PyObject *type = (PyObject *)pytalloc_GetObjectType(); Py_INCREF(type); return type; } static PyObject *testpytalloc_base_new(PyTypeObject *mod, PyObject *Py_UNUSED(ignored)) { char *obj = talloc_strdup(NULL, "This is a test string for a BaseObject");; return pytalloc_steal(pytalloc_GetBaseObjectType(), obj); } static PyObject *testpytalloc_base_get_object_type(PyObject *mod, PyObject *Py_UNUSED(ignored)) { PyObject *type = (PyObject *)pytalloc_GetBaseObjectType(); Py_INCREF(type); return type; } static PyObject *testpytalloc_reference(PyObject *mod, PyObject *args) { PyObject *source = NULL; void *ptr; if (!PyArg_ParseTuple(args, "O!", pytalloc_GetObjectType(), &source)) return NULL; ptr = pytalloc_get_ptr(source); return pytalloc_reference_ex(pytalloc_GetObjectType(), ptr, ptr); } static PyObject *testpytalloc_base_reference(PyObject *mod, PyObject *args) { PyObject *source = NULL; void *mem_ctx; if (!PyArg_ParseTuple(args, "O!", pytalloc_GetBaseObjectType(), &source)) { return NULL; } mem_ctx = pytalloc_get_mem_ctx(source); return pytalloc_reference_ex(pytalloc_GetBaseObjectType(), mem_ctx, mem_ctx); } static PyMethodDef test_talloc_methods[] = { { "new", (PyCFunction)testpytalloc_new, METH_NOARGS, "create a talloc Object with a testing string"}, { "get_object_type", (PyCFunction)testpytalloc_get_object_type, METH_NOARGS, "call pytalloc_GetObjectType"}, { "base_new", (PyCFunction)testpytalloc_base_new, METH_NOARGS, "create a talloc BaseObject with a testing string"}, { "base_get_object_type", (PyCFunction)testpytalloc_base_get_object_type, METH_NOARGS, "call pytalloc_GetBaseObjectType"}, { "reference", (PyCFunction)testpytalloc_reference, METH_VARARGS, "call pytalloc_reference_ex"}, { "base_reference", (PyCFunction)testpytalloc_base_reference, METH_VARARGS, "call pytalloc_reference_ex"}, {0} }; static PyTypeObject DObject_Type; static int dobject_destructor(void *ptr) { PyObject *destructor_func = *talloc_get_type(ptr, PyObject*); PyObject *ret; ret = PyObject_CallObject(destructor_func, NULL); Py_DECREF(destructor_func); if (ret == NULL) { PyErr_Print(); } else { Py_DECREF(ret); } return 0; } static PyObject *dobject_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *destructor_func = NULL; PyObject **obj; if (!PyArg_ParseTuple(args, "O", &destructor_func)) return NULL; Py_INCREF(destructor_func); obj = talloc(NULL, PyObject*); *obj = destructor_func; talloc_set_destructor((void*)obj, dobject_destructor); return pytalloc_steal(&DObject_Type, obj); } static PyTypeObject DObject_Type = { .tp_name = "_test_pytalloc.DObject", .tp_basicsize = sizeof(pytalloc_Object), .tp_methods = NULL, .tp_new = dobject_new, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "test talloc object that calls a function when underlying data is freed\n", }; static PyTypeObject DBaseObject_Type; static int d_base_object_destructor(void *ptr) { PyObject *destructor_func = *talloc_get_type(ptr, PyObject*); PyObject *ret; ret = PyObject_CallObject(destructor_func, NULL); Py_DECREF(destructor_func); if (ret == NULL) { PyErr_Print(); } else { Py_DECREF(ret); } return 0; } static PyObject *d_base_object_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *destructor_func = NULL; PyObject **obj; if (!PyArg_ParseTuple(args, "O", &destructor_func)) return NULL; Py_INCREF(destructor_func); obj = talloc(NULL, PyObject*); *obj = destructor_func; talloc_set_destructor((void*)obj, d_base_object_destructor); return pytalloc_steal(&DBaseObject_Type, obj); } static PyTypeObject DBaseObject_Type = { .tp_name = "_test_pytalloc.DBaseObject", .tp_methods = NULL, .tp_new = d_base_object_new, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_doc = "test talloc object that calls a function when underlying data is freed\n", }; #define MODULE_DOC PyDoc_STR("Test utility module for pytalloc") #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, .m_name = "_test_pytalloc", .m_doc = PyDoc_STR("Test utility module for pytalloc"), .m_size = -1, .m_methods = test_talloc_methods, }; #endif static PyObject *module_init(void); static PyObject *module_init(void) { PyObject *m; DObject_Type.tp_base = pytalloc_GetObjectType(); if (PyType_Ready(&DObject_Type) < 0) { return NULL; } DBaseObject_Type.tp_basicsize = pytalloc_BaseObject_size(); DBaseObject_Type.tp_base = pytalloc_GetBaseObjectType(); if (PyType_Ready(&DBaseObject_Type) < 0) { return NULL; } #if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&moduledef); #else m = Py_InitModule3("_test_pytalloc", test_talloc_methods, MODULE_DOC); #endif if (m == NULL) { return NULL; } Py_INCREF(&DObject_Type); Py_INCREF(DObject_Type.tp_base); PyModule_AddObject(m, "DObject", (PyObject *)&DObject_Type); Py_INCREF(&DBaseObject_Type); Py_INCREF(DBaseObject_Type.tp_base); PyModule_AddObject(m, "DBaseObject", (PyObject *)&DBaseObject_Type); return m; } #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit__test_pytalloc(void); PyMODINIT_FUNC PyInit__test_pytalloc(void) { return module_init(); } #else void init_test_pytalloc(void); void init_test_pytalloc(void) { module_init(); } #endif ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/test_pytalloc.py0000660000000000000000000001377200000000000020331 0ustar00rootroot00000000000000#!/usr/bin/env python3 # Simple tests for the talloc python bindings. # Copyright (C) 2015 Petr Viktorin import unittest import subprocess import sys import gc import talloc import _test_pytalloc def dummy_func(): pass class TallocTests(unittest.TestCase): def test_report_full(self): # report_full is hardcoded to print to stdout, so use a subprocess process = subprocess.Popen([ sys.executable, '-c', """if True: import talloc, _test_pytalloc obj = _test_pytalloc.new() talloc.report_full(obj) """ ], stdout=subprocess.PIPE) output, stderr = process.communicate() output = str(output) self.assertTrue("full talloc report on 'talloc.Object" in output) self.assertTrue("This is a test string" in output) def test_totalblocks(self): obj = _test_pytalloc.new() # Two blocks: the string, and the name self.assertEqual(talloc.total_blocks(obj), 2) def test_repr(self): obj = _test_pytalloc.new() prefix = '= obj1) self.assertFalse(obj1 > obj1) def test_compare_different(self): # object comparison is consistent obj1, obj2 = sorted([ _test_pytalloc.new(), _test_pytalloc.new()]) self.assertFalse(obj1 == obj2) self.assertTrue(obj1 != obj2) self.assertTrue(obj1 <= obj2) self.assertTrue(obj1 < obj2) self.assertFalse(obj1 >= obj2) self.assertFalse(obj1 > obj2) def test_compare_different_types(self): # object comparison falls back to comparing types if sys.version_info >= (3, 0): # In Python 3, types are unorderable -- nothing to test return if talloc.Object < _test_pytalloc.DObject: obj1 = _test_pytalloc.new() obj2 = _test_pytalloc.DObject(dummy_func) else: obj2 = _test_pytalloc.new() obj1 = _test_pytalloc.DObject(dummy_func) self.assertFalse(obj1 == obj2) self.assertTrue(obj1 != obj2) self.assertTrue(obj1 <= obj2) self.assertTrue(obj1 < obj2) self.assertFalse(obj1 >= obj2) self.assertFalse(obj1 > obj2) class TallocBaseComparisonTests(unittest.TestCase): def test_compare_same(self): obj1 = _test_pytalloc.base_new() self.assertTrue(obj1 == obj1) self.assertFalse(obj1 != obj1) self.assertTrue(obj1 <= obj1) self.assertFalse(obj1 < obj1) self.assertTrue(obj1 >= obj1) self.assertFalse(obj1 > obj1) def test_compare_different(self): # object comparison is consistent obj1, obj2 = sorted([ _test_pytalloc.base_new(), _test_pytalloc.base_new()]) self.assertFalse(obj1 == obj2) self.assertTrue(obj1 != obj2) self.assertTrue(obj1 <= obj2) self.assertTrue(obj1 < obj2) self.assertFalse(obj1 >= obj2) self.assertFalse(obj1 > obj2) def test_compare_different_types(self): # object comparison falls back to comparing types if sys.version_info >= (3, 0): # In Python 3, types are unorderable -- nothing to test return if talloc.BaseObject < _test_pytalloc.DBaseObject: obj1 = _test_pytalloc.base_new() obj2 = _test_pytalloc.DBaseObject(dummy_func) else: obj2 = _test_pytalloc.base_new() obj1 = _test_pytalloc.DBaseObject(dummy_func) self.assertFalse(obj1 == obj2) self.assertTrue(obj1 != obj2) self.assertTrue(obj1 <= obj2) self.assertTrue(obj1 < obj2) self.assertFalse(obj1 >= obj2) self.assertFalse(obj1 > obj2) class TallocUtilTests(unittest.TestCase): def test_get_type(self): self.assertTrue(talloc.Object is _test_pytalloc.get_object_type()) def test_reference(self): # Check correct lifetime of the talloc'd data with multiple references lst = [] obj = _test_pytalloc.DObject(lambda: lst.append('dead')) ref = _test_pytalloc.reference(obj) del obj gc.collect() self.assertEqual(lst, []) del ref gc.collect() self.assertEqual(lst, ['dead']) def test_get_base_type(self): self.assertTrue(talloc.BaseObject is _test_pytalloc.base_get_object_type()) def test_base_reference(self): # Check correct lifetime of the talloc'd data with multiple references lst = [] obj = _test_pytalloc.DBaseObject(lambda: lst.append('dead')) ref = _test_pytalloc.base_reference(obj) del obj gc.collect() self.assertEqual(lst, []) del ref gc.collect() self.assertEqual(lst, ['dead']) if __name__ == '__main__': unittest.TestProgram() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/talloc/testsuite.c0000660000000000000000000015656500000000000017276 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. local testing of talloc routines. Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/time.h" #include #ifdef HAVE_PTHREAD #include #endif #include #include #ifdef NDEBUG #undef NDEBUG #endif #include #include "talloc_testsuite.h" static struct timeval private_timeval_current(void) { struct timeval tv; gettimeofday(&tv, NULL); return tv; } static double private_timeval_elapsed(struct timeval *tv) { struct timeval tv2 = private_timeval_current(); return (tv2.tv_sec - tv->tv_sec) + (tv2.tv_usec - tv->tv_usec)*1.0e-6; } #define torture_assert(test, expr, str) if (!(expr)) { \ printf("failure: %s [\n%s: Expression %s failed: %s\n]\n", \ test, __location__, #expr, str); \ return false; \ } #define torture_assert_str_equal(test, arg1, arg2, desc) \ if (arg1 == NULL && arg2 == NULL) { /* OK, both NULL == equal */ \ } else if (arg1 == NULL || arg2 == NULL) { \ return false; \ } else if (strcmp(arg1, arg2)) { \ printf("failure: %s [\n%s: Expected %s, got %s: %s\n]\n", \ test, __location__, arg1, arg2, desc); \ return false; \ } #define CHECK_SIZE(test, ptr, tsize) do { \ if (talloc_total_size(ptr) != (tsize)) { \ printf("failed: %s [\n%s: wrong '%s' tree size: got %u expected %u\n]\n", \ test, __location__, #ptr, \ (unsigned)talloc_total_size(ptr), \ (unsigned)tsize); \ talloc_report_full(ptr, stdout); \ return false; \ } \ } while (0) #define CHECK_BLOCKS(test, ptr, tblocks) do { \ if (talloc_total_blocks(ptr) != (tblocks)) { \ printf("failed: %s [\n%s: wrong '%s' tree blocks: got %u expected %u\n]\n", \ test, __location__, #ptr, \ (unsigned)talloc_total_blocks(ptr), \ (unsigned)tblocks); \ talloc_report_full(ptr, stdout); \ return false; \ } \ } while (0) #define CHECK_PARENT(test, ptr, parent) do { \ if (talloc_parent(ptr) != (parent)) { \ printf("failed: %s [\n%s: '%s' has wrong parent: got %p expected %p\n]\n", \ test, __location__, #ptr, \ talloc_parent(ptr), \ (parent)); \ talloc_report_full(ptr, stdout); \ talloc_report_full(parent, stdout); \ talloc_report_full(NULL, stdout); \ return false; \ } \ } while (0) static unsigned int test_abort_count; #if 0 static void test_abort_fn(const char *reason) { printf("# test_abort_fn(%s)\n", reason); test_abort_count++; } static void test_abort_start(void) { test_abort_count = 0; talloc_set_abort_fn(test_abort_fn); } #endif static void test_abort_stop(void) { test_abort_count = 0; talloc_set_abort_fn(NULL); } static void test_log_stdout(const char *message) { fprintf(stdout, "%s", message); } /* test references */ static bool test_ref1(void) { void *root, *p1, *p2, *ref, *r1; printf("test: ref1\n# SINGLE REFERENCE FREE\n"); root = talloc_named_const(NULL, 0, "root"); p1 = talloc_named_const(root, 1, "p1"); p2 = talloc_named_const(p1, 1, "p2"); talloc_named_const(p1, 1, "x1"); talloc_named_const(p1, 2, "x2"); talloc_named_const(p1, 3, "x3"); r1 = talloc_named_const(root, 1, "r1"); ref = talloc_reference(r1, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("ref1", p1, 5); CHECK_BLOCKS("ref1", p2, 1); CHECK_BLOCKS("ref1", ref, 1); CHECK_BLOCKS("ref1", r1, 2); fprintf(stderr, "Freeing p2\n"); talloc_unlink(r1, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("ref1", p1, 5); CHECK_BLOCKS("ref1", p2, 1); CHECK_BLOCKS("ref1", r1, 1); fprintf(stderr, "Freeing p1\n"); talloc_free(p1); talloc_report_full(root, stderr); CHECK_BLOCKS("ref1", r1, 1); fprintf(stderr, "Freeing r1\n"); talloc_free(r1); talloc_report_full(NULL, stderr); fprintf(stderr, "Testing NULL\n"); if (talloc_reference(root, NULL)) { return false; } CHECK_BLOCKS("ref1", root, 1); CHECK_SIZE("ref1", root, 0); talloc_free(root); printf("success: ref1\n"); return true; } /* test references */ static bool test_ref2(void) { void *root, *p1, *p2, *ref, *r1; printf("test: ref2\n# DOUBLE REFERENCE FREE\n"); root = talloc_named_const(NULL, 0, "root"); p1 = talloc_named_const(root, 1, "p1"); talloc_named_const(p1, 1, "x1"); talloc_named_const(p1, 1, "x2"); talloc_named_const(p1, 1, "x3"); p2 = talloc_named_const(p1, 1, "p2"); r1 = talloc_named_const(root, 1, "r1"); ref = talloc_reference(r1, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("ref2", p1, 5); CHECK_BLOCKS("ref2", p2, 1); CHECK_BLOCKS("ref2", r1, 2); fprintf(stderr, "Freeing ref\n"); talloc_unlink(r1, ref); talloc_report_full(root, stderr); CHECK_BLOCKS("ref2", p1, 5); CHECK_BLOCKS("ref2", p2, 1); CHECK_BLOCKS("ref2", r1, 1); fprintf(stderr, "Freeing p2\n"); talloc_free(p2); talloc_report_full(root, stderr); CHECK_BLOCKS("ref2", p1, 4); CHECK_BLOCKS("ref2", r1, 1); fprintf(stderr, "Freeing p1\n"); talloc_free(p1); talloc_report_full(root, stderr); CHECK_BLOCKS("ref2", r1, 1); fprintf(stderr, "Freeing r1\n"); talloc_free(r1); talloc_report_full(root, stderr); CHECK_SIZE("ref2", root, 0); talloc_free(root); printf("success: ref2\n"); return true; } /* test references */ static bool test_ref3(void) { void *root, *p1, *p2, *ref, *r1; printf("test: ref3\n# PARENT REFERENCE FREE\n"); root = talloc_named_const(NULL, 0, "root"); p1 = talloc_named_const(root, 1, "p1"); p2 = talloc_named_const(root, 1, "p2"); r1 = talloc_named_const(p1, 1, "r1"); ref = talloc_reference(p2, r1); talloc_report_full(root, stderr); CHECK_BLOCKS("ref3", p1, 2); CHECK_BLOCKS("ref3", p2, 2); CHECK_BLOCKS("ref3", r1, 1); CHECK_BLOCKS("ref3", ref, 1); fprintf(stderr, "Freeing p1\n"); talloc_free(p1); talloc_report_full(root, stderr); CHECK_BLOCKS("ref3", p2, 2); CHECK_BLOCKS("ref3", r1, 1); fprintf(stderr, "Freeing p2\n"); talloc_free(p2); talloc_report_full(root, stderr); CHECK_SIZE("ref3", root, 0); talloc_free(root); printf("success: ref3\n"); return true; } /* test references */ static bool test_ref4(void) { void *root, *p1, *p2, *ref, *r1; printf("test: ref4\n# REFERRER REFERENCE FREE\n"); root = talloc_named_const(NULL, 0, "root"); p1 = talloc_named_const(root, 1, "p1"); talloc_named_const(p1, 1, "x1"); talloc_named_const(p1, 1, "x2"); talloc_named_const(p1, 1, "x3"); p2 = talloc_named_const(p1, 1, "p2"); r1 = talloc_named_const(root, 1, "r1"); ref = talloc_reference(r1, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("ref4", p1, 5); CHECK_BLOCKS("ref4", p2, 1); CHECK_BLOCKS("ref4", ref, 1); CHECK_BLOCKS("ref4", r1, 2); fprintf(stderr, "Freeing r1\n"); talloc_free(r1); talloc_report_full(root, stderr); CHECK_BLOCKS("ref4", p1, 5); CHECK_BLOCKS("ref4", p2, 1); fprintf(stderr, "Freeing p2\n"); talloc_free(p2); talloc_report_full(root, stderr); CHECK_BLOCKS("ref4", p1, 4); fprintf(stderr, "Freeing p1\n"); talloc_free(p1); talloc_report_full(root, stderr); CHECK_SIZE("ref4", root, 0); talloc_free(root); printf("success: ref4\n"); return true; } /* test references */ static bool test_unlink1(void) { void *root, *p1, *p2, *ref, *r1; printf("test: unlink\n# UNLINK\n"); root = talloc_named_const(NULL, 0, "root"); p1 = talloc_named_const(root, 1, "p1"); talloc_named_const(p1, 1, "x1"); talloc_named_const(p1, 1, "x2"); talloc_named_const(p1, 1, "x3"); p2 = talloc_named_const(p1, 1, "p2"); r1 = talloc_named_const(p1, 1, "r1"); ref = talloc_reference(r1, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("unlink", p1, 7); CHECK_BLOCKS("unlink", p2, 1); CHECK_BLOCKS("unlink", ref, 1); CHECK_BLOCKS("unlink", r1, 2); fprintf(stderr, "Unreferencing r1\n"); talloc_unlink(r1, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("unlink", p1, 6); CHECK_BLOCKS("unlink", p2, 1); CHECK_BLOCKS("unlink", r1, 1); fprintf(stderr, "Freeing p1\n"); talloc_free(p1); talloc_report_full(root, stderr); CHECK_SIZE("unlink", root, 0); talloc_free(root); printf("success: unlink\n"); return true; } static int fail_destructor(void *ptr) { return -1; } /* miscellaneous tests to try to get a higher test coverage percentage */ static bool test_misc(void) { void *root, *p1; char *p2; double *d; const char *name; printf("test: misc\n# MISCELLANEOUS\n"); root = talloc_new(NULL); p1 = talloc_size(root, 0x7fffffff); torture_assert("misc", !p1, "failed: large talloc allowed\n"); p1 = talloc_strdup(root, "foo"); talloc_increase_ref_count(p1); talloc_increase_ref_count(p1); talloc_increase_ref_count(p1); CHECK_BLOCKS("misc", p1, 1); CHECK_BLOCKS("misc", root, 2); talloc_unlink(NULL, p1); CHECK_BLOCKS("misc", p1, 1); CHECK_BLOCKS("misc", root, 2); talloc_unlink(NULL, p1); CHECK_BLOCKS("misc", p1, 1); CHECK_BLOCKS("misc", root, 2); p2 = talloc_strdup(p1, "foo"); torture_assert("misc", talloc_unlink(root, p2) == -1, "failed: talloc_unlink() of non-reference context should return -1\n"); torture_assert("misc", talloc_unlink(p1, p2) == 0, "failed: talloc_unlink() of parent should succeed\n"); talloc_unlink(NULL, p1); CHECK_BLOCKS("misc", p1, 1); CHECK_BLOCKS("misc", root, 2); name = talloc_set_name(p1, "my name is %s", "foo"); torture_assert_str_equal("misc", talloc_get_name(p1), "my name is foo", "failed: wrong name after talloc_set_name(my name is foo)"); torture_assert_str_equal("misc", talloc_get_name(p1), name, "failed: wrong name after talloc_set_name(my name is foo)"); CHECK_BLOCKS("misc", p1, 2); CHECK_BLOCKS("misc", root, 3); talloc_set_name_const(p1, NULL); torture_assert_str_equal ("misc", talloc_get_name(p1), "UNNAMED", "failed: wrong name after talloc_set_name(NULL)"); CHECK_BLOCKS("misc", p1, 2); CHECK_BLOCKS("misc", root, 3); torture_assert("misc", talloc_free(NULL) == -1, "talloc_free(NULL) should give -1\n"); talloc_set_destructor(p1, fail_destructor); torture_assert("misc", talloc_free(p1) == -1, "Failed destructor should cause talloc_free to fail\n"); talloc_set_destructor(p1, NULL); talloc_report(root, stderr); p2 = (char *)talloc_zero_size(p1, 20); torture_assert("misc", p2[19] == 0, "Failed to give zero memory\n"); talloc_free(p2); torture_assert("misc", talloc_strdup(root, NULL) == NULL, "failed: strdup on NULL should give NULL\n"); p2 = talloc_strndup(p1, "foo", 2); torture_assert("misc", strcmp("fo", p2) == 0, "strndup doesn't work\n"); p2 = talloc_asprintf_append_buffer(p2, "o%c", 'd'); torture_assert("misc", strcmp("food", p2) == 0, "talloc_asprintf_append_buffer doesn't work\n"); CHECK_BLOCKS("misc", p2, 1); CHECK_BLOCKS("misc", p1, 3); p2 = talloc_asprintf_append_buffer(NULL, "hello %s", "world"); torture_assert("misc", strcmp("hello world", p2) == 0, "talloc_asprintf_append_buffer doesn't work\n"); CHECK_BLOCKS("misc", p2, 1); CHECK_BLOCKS("misc", p1, 3); talloc_free(p2); d = talloc_array(p1, double, 0x20000000); torture_assert("misc", !d, "failed: integer overflow not detected\n"); d = talloc_realloc(p1, d, double, 0x20000000); torture_assert("misc", !d, "failed: integer overflow not detected\n"); talloc_free(p1); CHECK_BLOCKS("misc", root, 1); p1 = talloc_named(root, 100, "%d bytes", 100); CHECK_BLOCKS("misc", p1, 2); CHECK_BLOCKS("misc", root, 3); talloc_unlink(root, p1); p1 = talloc_init("%d bytes", 200); p2 = talloc_asprintf(p1, "my test '%s'", "string"); torture_assert_str_equal("misc", p2, "my test 'string'", "failed: talloc_asprintf(\"my test '%%s'\", \"string\") gave: \"%s\""); CHECK_BLOCKS("misc", p1, 3); CHECK_SIZE("misc", p2, 17); CHECK_BLOCKS("misc", root, 1); talloc_unlink(NULL, p1); p1 = talloc_named_const(root, 10, "p1"); p2 = (char *)talloc_named_const(root, 20, "p2"); (void)talloc_reference(p1, p2); talloc_report_full(root, stderr); talloc_unlink(root, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("misc", p2, 1); CHECK_BLOCKS("misc", p1, 2); CHECK_BLOCKS("misc", root, 3); talloc_unlink(p1, p2); talloc_unlink(root, p1); p1 = talloc_named_const(root, 10, "p1"); p2 = (char *)talloc_named_const(root, 20, "p2"); (void)talloc_reference(NULL, p2); talloc_report_full(root, stderr); talloc_unlink(root, p2); talloc_report_full(root, stderr); CHECK_BLOCKS("misc", p2, 1); CHECK_BLOCKS("misc", p1, 1); CHECK_BLOCKS("misc", root, 2); talloc_unlink(NULL, p2); talloc_unlink(root, p1); /* Test that talloc_unlink is a no-op */ torture_assert("misc", talloc_unlink(root, NULL) == -1, "failed: talloc_unlink(root, NULL) == -1\n"); talloc_report(root, stderr); talloc_report(NULL, stderr); CHECK_SIZE("misc", root, 0); talloc_free(root); CHECK_SIZE("misc", NULL, 0); talloc_enable_null_tracking_no_autofree(); talloc_enable_leak_report(); talloc_enable_leak_report_full(); printf("success: misc\n"); return true; } /* test realloc */ static bool test_realloc(void) { void *root, *p1, *p2; printf("test: realloc\n# REALLOC\n"); root = talloc_new(NULL); p1 = talloc_size(root, 10); CHECK_SIZE("realloc", p1, 10); p1 = talloc_realloc_size(NULL, p1, 20); CHECK_SIZE("realloc", p1, 20); talloc_new(p1); p2 = talloc_realloc_size(p1, NULL, 30); talloc_new(p1); p2 = talloc_realloc_size(p1, p2, 40); CHECK_SIZE("realloc", p2, 40); CHECK_SIZE("realloc", root, 60); CHECK_BLOCKS("realloc", p1, 4); p1 = talloc_realloc_size(NULL, p1, 20); CHECK_SIZE("realloc", p1, 60); talloc_increase_ref_count(p2); torture_assert("realloc", talloc_realloc_size(NULL, p2, 5) == NULL, "failed: talloc_realloc() on a referenced pointer should fail\n"); CHECK_BLOCKS("realloc", p1, 4); talloc_realloc_size(NULL, p2, 0); talloc_realloc_size(NULL, p2, 0); CHECK_BLOCKS("realloc", p1, 4); talloc_realloc_size(p1, p2, 0); CHECK_BLOCKS("realloc", p1, 3); torture_assert("realloc", talloc_realloc_size(NULL, p1, 0x7fffffff) == NULL, "failed: oversize talloc should fail\n"); talloc_realloc_size(NULL, p1, 0); CHECK_BLOCKS("realloc", root, 4); talloc_realloc_size(root, p1, 0); CHECK_BLOCKS("realloc", root, 1); CHECK_SIZE("realloc", root, 0); talloc_free(root); printf("success: realloc\n"); return true; } /* test realloc with a child */ static bool test_realloc_child(void) { void *root; struct el2 { const char *name; } *el2, *el2_2, *el2_3, **el_list_save; struct el1 { int count; struct el2 **list, **list2, **list3; } *el1; printf("test: REALLOC WITH CHILD\n"); root = talloc_new(NULL); el1 = talloc(root, struct el1); el1->list = talloc(el1, struct el2 *); el1->list[0] = talloc(el1->list, struct el2); el1->list[0]->name = talloc_strdup(el1->list[0], "testing"); el1->list2 = talloc(el1, struct el2 *); el1->list2[0] = talloc(el1->list2, struct el2); el1->list2[0]->name = talloc_strdup(el1->list2[0], "testing2"); el1->list3 = talloc(el1, struct el2 *); el1->list3[0] = talloc(el1->list3, struct el2); el1->list3[0]->name = talloc_strdup(el1->list3[0], "testing2"); el2 = talloc(el1->list, struct el2); CHECK_PARENT("el2", el2, el1->list); el2_2 = talloc(el1->list2, struct el2); CHECK_PARENT("el2", el2_2, el1->list2); el2_3 = talloc(el1->list3, struct el2); CHECK_PARENT("el2", el2_3, el1->list3); el_list_save = el1->list; el1->list = talloc_realloc(el1, el1->list, struct el2 *, 100); if (el1->list == el_list_save) { printf("failure: talloc_realloc didn't move pointer"); return false; } CHECK_PARENT("el1_after_realloc", el1->list, el1); el1->list2 = talloc_realloc(el1, el1->list2, struct el2 *, 200); CHECK_PARENT("el1_after_realloc", el1->list2, el1); el1->list3 = talloc_realloc(el1, el1->list3, struct el2 *, 300); CHECK_PARENT("el1_after_realloc", el1->list3, el1); CHECK_PARENT("el2", el2, el1->list); CHECK_PARENT("el2", el2_2, el1->list2); CHECK_PARENT("el2", el2_3, el1->list3); /* Finally check realloc with multiple children */ el1 = talloc_realloc(root, el1, struct el1, 100); CHECK_PARENT("el1->list", el1->list, el1); CHECK_PARENT("el1->list2", el1->list2, el1); CHECK_PARENT("el1->list3", el1->list3, el1); talloc_free(root); printf("success: REALLOC WITH CHILD\n"); return true; } /* test type checking */ static bool test_type(void) { void *root; struct el1 { int count; }; struct el2 { int count; }; struct el1 *el1; printf("test: type\n# talloc type checking\n"); root = talloc_new(NULL); el1 = talloc(root, struct el1); el1->count = 1; torture_assert("type", talloc_get_type(el1, struct el1) == el1, "type check failed on el1\n"); torture_assert("type", talloc_get_type(el1, struct el2) == NULL, "type check failed on el1 with el2\n"); talloc_set_type(el1, struct el2); torture_assert("type", talloc_get_type(el1, struct el2) == (struct el2 *)el1, "type set failed on el1 with el2\n"); talloc_free(root); printf("success: type\n"); return true; } /* test steal */ static bool test_steal(void) { void *root, *p1, *p2; printf("test: steal\n# STEAL\n"); root = talloc_new(NULL); p1 = talloc_array(root, char, 10); CHECK_SIZE("steal", p1, 10); p2 = talloc_realloc(root, NULL, char, 20); CHECK_SIZE("steal", p1, 10); CHECK_SIZE("steal", root, 30); torture_assert("steal", talloc_steal(p1, NULL) == NULL, "failed: stealing NULL should give NULL\n"); torture_assert("steal", talloc_steal(p1, p1) == p1, "failed: stealing to ourselves is a nop\n"); CHECK_BLOCKS("steal", root, 3); CHECK_SIZE("steal", root, 30); talloc_steal(NULL, p1); talloc_steal(NULL, p2); CHECK_BLOCKS("steal", root, 1); CHECK_SIZE("steal", root, 0); talloc_free(p1); talloc_steal(root, p2); CHECK_BLOCKS("steal", root, 2); CHECK_SIZE("steal", root, 20); talloc_free(p2); CHECK_BLOCKS("steal", root, 1); CHECK_SIZE("steal", root, 0); talloc_free(root); p1 = talloc_size(NULL, 3); talloc_report_full(NULL, stderr); CHECK_SIZE("steal", NULL, 3); talloc_free(p1); printf("success: steal\n"); return true; } /* test move */ static bool test_move(void) { void *root; struct t_move { char *p; int *x; } *t1, *t2; printf("test: move\n# MOVE\n"); root = talloc_new(NULL); t1 = talloc(root, struct t_move); t2 = talloc(root, struct t_move); t1->p = talloc_strdup(t1, "foo"); t1->x = talloc(t1, int); *t1->x = 42; t2->p = talloc_move(t2, &t1->p); t2->x = talloc_move(t2, &t1->x); torture_assert("move", t1->p == NULL && t1->x == NULL && strcmp(t2->p, "foo") == 0 && *t2->x == 42, "talloc move failed"); talloc_free(root); printf("success: move\n"); return true; } /* test talloc_realloc_fn */ static bool test_realloc_fn(void) { void *root, *p1; printf("test: realloc_fn\n# talloc_realloc_fn\n"); root = talloc_new(NULL); p1 = talloc_realloc_fn(root, NULL, 10); CHECK_BLOCKS("realloc_fn", root, 2); CHECK_SIZE("realloc_fn", root, 10); p1 = talloc_realloc_fn(root, p1, 20); CHECK_BLOCKS("realloc_fn", root, 2); CHECK_SIZE("realloc_fn", root, 20); p1 = talloc_realloc_fn(root, p1, 0); CHECK_BLOCKS("realloc_fn", root, 1); CHECK_SIZE("realloc_fn", root, 0); talloc_free(root); printf("success: realloc_fn\n"); return true; } static bool test_unref_reparent(void) { void *root, *p1, *p2, *c1; printf("test: unref_reparent\n# UNREFERENCE AFTER PARENT FREED\n"); root = talloc_named_const(NULL, 0, "root"); p1 = talloc_named_const(root, 1, "orig parent"); p2 = talloc_named_const(root, 1, "parent by reference"); c1 = talloc_named_const(p1, 1, "child"); talloc_reference(p2, c1); CHECK_PARENT("unref_reparent", c1, p1); talloc_free(p1); CHECK_PARENT("unref_reparent", c1, p2); talloc_unlink(p2, c1); CHECK_SIZE("unref_reparent", root, 1); talloc_free(p2); talloc_free(root); printf("success: unref_reparent\n"); return true; } /* measure the speed of talloc versus malloc */ static bool test_speed(void) { void *ctx = talloc_new(NULL); unsigned count; const int loop = 1000; int i; struct timeval tv; printf("test: speed\n# TALLOC VS MALLOC SPEED\n"); tv = private_timeval_current(); count = 0; do { void *p1, *p2, *p3; for (i=0;ireq2 = talloc_strdup(req1, "req2"); talloc_set_destructor(req1->req2, test_loop_destructor); req1->req3 = talloc_strdup(req1, "req3"); (void)talloc_reference(req1->req3, req1); talloc_report_full(top, stderr); talloc_free(parent); talloc_report_full(top, stderr); talloc_report_full(NULL, stderr); talloc_free(top); torture_assert("loop", loop_destructor_count == 1, "FAILED TO FIRE LOOP DESTRUCTOR\n"); loop_destructor_count = 0; printf("success: loop\n"); return true; } static int realloc_parent_destructor_count; static int test_realloc_parent_destructor(char *ptr) { realloc_parent_destructor_count++; return 0; } static bool test_realloc_on_destructor_parent(void) { void *top = talloc_new(NULL); char *parent; char *a, *b, *C, *D; realloc_parent_destructor_count = 0; printf("test: free_for_exit\n# TALLOC FREE FOR EXIT\n"); parent = talloc_strdup(top, "parent"); a = talloc_strdup(parent, "a"); b = talloc_strdup(a, "b"); C = talloc_strdup(a, "C"); D = talloc_strdup(b, "D"); talloc_set_destructor(D, test_realloc_parent_destructor); /* Capitalised ones have destructors. * * parent --> a -> b -> D * -> c */ a = talloc_realloc(parent, a, char, 2048); torture_assert("check talloc_realloc", a != NULL, "talloc_realloc failed"); talloc_set_destructor(C, test_realloc_parent_destructor); /* * parent --> a[2048] -> b -> D * -> C * */ talloc_free(parent); torture_assert("check destructor realloc_parent_destructor", realloc_parent_destructor_count == 2, "FAILED TO FIRE free_for_exit_destructor\n"); printf("success: free_for_exit\n"); talloc_free(top); /* make ASAN happy */ return true; } static int fail_destructor_str(char *ptr) { return -1; } static bool test_free_parent_deny_child(void) { void *top = talloc_new(NULL); char *level1; char *level2; char *level3; printf("test: free_parent_deny_child\n# TALLOC FREE PARENT DENY CHILD\n"); level1 = talloc_strdup(top, "level1"); level2 = talloc_strdup(level1, "level2"); level3 = talloc_strdup(level2, "level3"); talloc_set_destructor(level3, fail_destructor_str); talloc_free(level1); talloc_set_destructor(level3, NULL); CHECK_PARENT("free_parent_deny_child", level3, top); talloc_free(top); printf("success: free_parent_deny_child\n"); return true; } struct new_parent { void *new_parent; char val[20]; }; static int reparenting_destructor(struct new_parent *np) { talloc_set_destructor(np, NULL); (void)talloc_move(np->new_parent, &np); return -1; } static bool test_free_parent_reparent_child(void) { void *top = talloc_new(NULL); char *level1; char *alternate_level1; char *level2; struct new_parent *level3; printf("test: free_parent_reparent_child\n# " "TALLOC FREE PARENT REPARENT CHILD\n"); level1 = talloc_strdup(top, "level1"); alternate_level1 = talloc_strdup(top, "alternate_level1"); level2 = talloc_strdup(level1, "level2"); level3 = talloc(level2, struct new_parent); level3->new_parent = alternate_level1; memset(level3->val, 'x', sizeof(level3->val)); talloc_set_destructor(level3, reparenting_destructor); talloc_free(level1); CHECK_PARENT("free_parent_reparent_child", level3, alternate_level1); talloc_free(top); printf("success: free_parent_reparent_child\n"); return true; } static bool test_free_parent_reparent_child_in_pool(void) { void *top = talloc_new(NULL); char *level1; char *alternate_level1; char *level2; void *pool; struct new_parent *level3; printf("test: free_parent_reparent_child_in_pool\n# " "TALLOC FREE PARENT REPARENT CHILD IN POOL\n"); pool = talloc_pool(top, 1024); level1 = talloc_strdup(pool, "level1"); alternate_level1 = talloc_strdup(top, "alternate_level1"); level2 = talloc_strdup(level1, "level2"); level3 = talloc(level2, struct new_parent); level3->new_parent = alternate_level1; memset(level3->val, 'x', sizeof(level3->val)); talloc_set_destructor(level3, reparenting_destructor); talloc_free(level1); talloc_set_destructor(level3, NULL); CHECK_PARENT("free_parent_reparent_child_in_pool", level3, alternate_level1); /* Even freeing alternate_level1 should leave pool alone. */ talloc_free(alternate_level1); talloc_free(top); printf("success: free_parent_reparent_child_in_pool\n"); return true; } static bool test_talloc_ptrtype(void) { void *top = talloc_new(NULL); struct struct1 { int foo; int bar; } *s1, *s2, **s3, ***s4; const char *location1; const char *location2; const char *location3; const char *location4; printf("test: ptrtype\n# TALLOC PTRTYPE\n"); s1 = talloc_ptrtype(top, s1);location1 = __location__; if (talloc_get_size(s1) != sizeof(struct struct1)) { printf("failure: ptrtype [\n" "talloc_ptrtype() allocated the wrong size %lu (should be %lu)\n" "]\n", (unsigned long)talloc_get_size(s1), (unsigned long)sizeof(struct struct1)); return false; } if (strcmp(location1, talloc_get_name(s1)) != 0) { printf("failure: ptrtype [\n" "talloc_ptrtype() sets the wrong name '%s' (should be '%s')\n]\n", talloc_get_name(s1), location1); return false; } s2 = talloc_array_ptrtype(top, s2, 10);location2 = __location__; if (talloc_get_size(s2) != (sizeof(struct struct1) * 10)) { printf("failure: ptrtype [\n" "talloc_array_ptrtype() allocated the wrong size " "%lu (should be %lu)\n]\n", (unsigned long)talloc_get_size(s2), (unsigned long)(sizeof(struct struct1)*10)); return false; } if (strcmp(location2, talloc_get_name(s2)) != 0) { printf("failure: ptrtype [\n" "talloc_array_ptrtype() sets the wrong name '%s' (should be '%s')\n]\n", talloc_get_name(s2), location2); return false; } s3 = talloc_array_ptrtype(top, s3, 10);location3 = __location__; if (talloc_get_size(s3) != (sizeof(struct struct1 *) * 10)) { printf("failure: ptrtype [\n" "talloc_array_ptrtype() allocated the wrong size " "%lu (should be %lu)\n]\n", (unsigned long)talloc_get_size(s3), (unsigned long)(sizeof(struct struct1 *)*10)); return false; } torture_assert_str_equal("ptrtype", location3, talloc_get_name(s3), "talloc_array_ptrtype() sets the wrong name"); s4 = talloc_array_ptrtype(top, s4, 10);location4 = __location__; if (talloc_get_size(s4) != (sizeof(struct struct1 **) * 10)) { printf("failure: ptrtype [\n" "talloc_array_ptrtype() allocated the wrong size " "%lu (should be %lu)\n]\n", (unsigned long)talloc_get_size(s4), (unsigned long)(sizeof(struct struct1 **)*10)); return false; } torture_assert_str_equal("ptrtype", location4, talloc_get_name(s4), "talloc_array_ptrtype() sets the wrong name"); talloc_free(top); printf("success: ptrtype\n"); return true; } static int _test_talloc_free_in_destructor(void **ptr) { talloc_free(*ptr); return 0; } static bool test_talloc_free_in_destructor(void) { void *level0; void *level1; void *level2; void *level3; void *level4; void **level5; printf("test: free_in_destructor\n# TALLOC FREE IN DESTRUCTOR\n"); level0 = talloc_new(NULL); level1 = talloc_new(level0); level2 = talloc_new(level1); level3 = talloc_new(level2); level4 = talloc_new(level3); level5 = talloc(level4, void *); *level5 = level3; (void)talloc_reference(level0, level3); (void)talloc_reference(level3, level3); (void)talloc_reference(level5, level3); talloc_set_destructor(level5, _test_talloc_free_in_destructor); talloc_free(level1); talloc_free(level0); talloc_free(level3); /* make ASAN happy */ printf("success: free_in_destructor\n"); return true; } static bool test_autofree(void) { #if _SAMBA_BUILD_ < 4 /* autofree test would kill smbtorture */ void *p; printf("test: autofree\n# TALLOC AUTOFREE CONTEXT\n"); p = talloc_autofree_context(); talloc_free(p); p = talloc_autofree_context(); talloc_free(p); printf("success: autofree\n"); #endif return true; } static bool test_pool(void) { void *pool; void *p1, *p2, *p3, *p4; void *p2_2; pool = talloc_pool(NULL, 1024); p1 = talloc_size(pool, 80); memset(p1, 0x11, talloc_get_size(p1)); p2 = talloc_size(pool, 20); memset(p2, 0x11, talloc_get_size(p2)); p3 = talloc_size(p1, 50); memset(p3, 0x11, talloc_get_size(p3)); p4 = talloc_size(p3, 1000); memset(p4, 0x11, talloc_get_size(p4)); p2_2 = talloc_realloc_size(pool, p2, 20+1); torture_assert("pool realloc 20+1", p2_2 == p2, "failed: pointer changed"); memset(p2, 0x11, talloc_get_size(p2)); p2_2 = talloc_realloc_size(pool, p2, 20-1); torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); memset(p2, 0x11, talloc_get_size(p2)); p2_2 = talloc_realloc_size(pool, p2, 20-1); torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); memset(p2, 0x11, talloc_get_size(p2)); talloc_free(p3); /* this should reclaim the memory of p4 and p3 */ p2_2 = talloc_realloc_size(pool, p2, 400); torture_assert("pool realloc 400", p2_2 == p2, "failed: pointer changed"); memset(p2, 0x11, talloc_get_size(p2)); talloc_free(p1); /* this should reclaim the memory of p1 */ p2_2 = talloc_realloc_size(pool, p2, 800); torture_assert("pool realloc 800", p2_2 == p1, "failed: pointer not changed"); p2 = p2_2; memset(p2, 0x11, talloc_get_size(p2)); /* this should do a malloc */ p2_2 = talloc_realloc_size(pool, p2, 1800); torture_assert("pool realloc 1800", p2_2 != p2, "failed: pointer not changed"); p2 = p2_2; memset(p2, 0x11, talloc_get_size(p2)); /* this should reclaim the memory from the pool */ p3 = talloc_size(pool, 80); torture_assert("pool alloc 80", p3 == p1, "failed: pointer changed"); memset(p3, 0x11, talloc_get_size(p3)); talloc_free(p2); talloc_free(p3); p1 = talloc_size(pool, 80); memset(p1, 0x11, talloc_get_size(p1)); p2 = talloc_size(pool, 20); memset(p2, 0x11, talloc_get_size(p2)); talloc_free(p1); p2_2 = talloc_realloc_size(pool, p2, 20-1); torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); memset(p2, 0x11, talloc_get_size(p2)); p2_2 = talloc_realloc_size(pool, p2, 20-1); torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); memset(p2, 0x11, talloc_get_size(p2)); /* this should do a malloc */ p2_2 = talloc_realloc_size(pool, p2, 1800); torture_assert("pool realloc 1800", p2_2 != p2, "failed: pointer not changed"); p2 = p2_2; memset(p2, 0x11, talloc_get_size(p2)); /* this should reclaim the memory from the pool */ p3 = talloc_size(pool, 800); torture_assert("pool alloc 800", p3 == p1, "failed: pointer changed"); memset(p3, 0x11, talloc_get_size(p3)); talloc_free(pool); return true; } static bool test_pool_steal(void) { void *root; void *pool; void *p1, *p2; void *p1_2, *p2_2; size_t hdr; size_t ofs1, ofs2; root = talloc_new(NULL); pool = talloc_pool(root, 1024); p1 = talloc_size(pool, 4 * 16); torture_assert("pool allocate 4 * 16", p1 != NULL, "failed "); memset(p1, 0x11, talloc_get_size(p1)); p2 = talloc_size(pool, 4 * 16); torture_assert("pool allocate 4 * 16", p2 > p1, "failed: !(p2 > p1) "); memset(p2, 0x11, talloc_get_size(p2)); ofs1 = PTR_DIFF(p2, p1); hdr = ofs1 - talloc_get_size(p1); talloc_steal(root, p1); talloc_steal(root, p2); talloc_free(pool); p1_2 = p1; p1_2 = talloc_realloc_size(root, p1, 5 * 16); torture_assert("pool realloc 5 * 16", p1_2 > p2, "failed: pointer not changed"); memset(p1_2, 0x11, talloc_get_size(p1_2)); ofs1 = PTR_DIFF(p1_2, p2); ofs2 = talloc_get_size(p2) + hdr; torture_assert("pool realloc ", ofs1 == ofs2, "failed: pointer offset unexpected"); p2_2 = talloc_realloc_size(root, p2, 3 * 16); torture_assert("pool realloc 5 * 16", p2_2 == p2, "failed: pointer changed"); memset(p2_2, 0x11, talloc_get_size(p2_2)); talloc_free(p1_2); p2_2 = p2; /* now we should reclaim the full pool */ p2_2 = talloc_realloc_size(root, p2, 8 * 16); torture_assert("pool realloc 8 * 16", p2_2 == p1, "failed: pointer not expected"); p2 = p2_2; memset(p2_2, 0x11, talloc_get_size(p2_2)); /* now we malloc and free the full pool space */ p2_2 = talloc_realloc_size(root, p2, 2 * 1024); torture_assert("pool realloc 2 * 1024", p2_2 != p1, "failed: pointer not expected"); memset(p2_2, 0x11, talloc_get_size(p2_2)); talloc_free(p2_2); talloc_free(root); return true; } static bool test_pool_nest(void) { void *p1, *p2, *p3; void *e = talloc_new(NULL); p1 = talloc_pool(NULL, 1024); torture_assert("talloc_pool", p1 != NULL, "failed"); p2 = talloc_pool(p1, 500); torture_assert("talloc_pool", p2 != NULL, "failed"); p3 = talloc_size(p2, 10); talloc_steal(e, p3); talloc_free(p2); talloc_free(p3); talloc_free(p1); talloc_free(e); /* make ASAN happy */ return true; } struct pooled { char *s1; char *s2; char *s3; }; static bool test_pooled_object(void) { struct pooled *p; const char *s1 = "hello"; const char *s2 = "world"; const char *s3 = ""; p = talloc_pooled_object(NULL, struct pooled, 3, strlen(s1)+strlen(s2)+strlen(s3)+3); if (talloc_get_size(p) != sizeof(struct pooled)) { return false; } p->s1 = talloc_strdup(p, s1); TALLOC_FREE(p->s1); p->s1 = talloc_strdup(p, s2); TALLOC_FREE(p->s1); p->s1 = talloc_strdup(p, s1); p->s2 = talloc_strdup(p, s2); p->s3 = talloc_strdup(p, s3); TALLOC_FREE(p); return true; } static bool test_free_ref_null_context(void) { void *p1, *p2, *p3; int ret; talloc_disable_null_tracking(); p1 = talloc_new(NULL); p2 = talloc_new(NULL); p3 = talloc_reference(p2, p1); torture_assert("reference", p3 == p1, "failed: reference on null"); ret = talloc_free(p1); torture_assert("ref free with null parent", ret == 0, "failed: free with null parent"); talloc_free(p2); talloc_enable_null_tracking_no_autofree(); p1 = talloc_new(NULL); p2 = talloc_new(NULL); p3 = talloc_reference(p2, p1); torture_assert("reference", p3 == p1, "failed: reference on null"); ret = talloc_free(p1); torture_assert("ref free with null tracked parent", ret == 0, "failed: free with null parent"); talloc_free(p2); return true; } static bool test_rusty(void) { void *root; char *p1; talloc_enable_null_tracking(); root = talloc_new(NULL); p1 = talloc_strdup(root, "foo"); talloc_increase_ref_count(p1); talloc_report_full(root, stdout); talloc_free(root); CHECK_BLOCKS("null_context", NULL, 2); talloc_free(p1); /* make ASAN happy */ return true; } static bool test_free_children(void) { void *root; char *p1, *p2; const char *name, *name2; talloc_enable_null_tracking(); root = talloc_new(NULL); p1 = talloc_strdup(root, "foo1"); p2 = talloc_strdup(p1, "foo2"); (void)p2; talloc_set_name(p1, "%s", "testname"); talloc_free_children(p1); /* check its still a valid talloc ptr */ talloc_get_size(talloc_get_name(p1)); if (strcmp(talloc_get_name(p1), "testname") != 0) { return false; } talloc_set_name(p1, "%s", "testname"); name = talloc_get_name(p1); talloc_free_children(p1); /* check its still a valid talloc ptr */ talloc_get_size(talloc_get_name(p1)); torture_assert("name", name == talloc_get_name(p1), "name ptr changed"); torture_assert("namecheck", strcmp(talloc_get_name(p1), "testname") == 0, "wrong name"); CHECK_BLOCKS("name1", p1, 2); /* note that this does not free the old child name */ talloc_set_name_const(p1, "testname2"); name2 = talloc_get_name(p1); /* but this does */ talloc_free_children(p1); (void)name2; torture_assert("namecheck", strcmp(talloc_get_name(p1), "testname2") == 0, "wrong name"); CHECK_BLOCKS("name1", p1, 1); talloc_report_full(root, stdout); talloc_free(root); return true; } static bool test_memlimit(void) { void *root; char *l1, *l2, *l3, *l4, *l5, *t; char *pool; int i; printf("test: memlimit\n# MEMORY LIMITS\n"); printf("==== talloc_new(NULL)\n"); root = talloc_new(NULL); talloc_report_full(root, stdout); printf("==== talloc_size(root, 2048)\n"); l1 = talloc_size(root, 2048); torture_assert("memlimit", l1 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_free(l1)\n"); talloc_free(l1); talloc_report_full(root, stdout); printf("==== talloc_strdup(root, level 1)\n"); l1 = talloc_strdup(root, "level 1"); torture_assert("memlimit", l1 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_set_memlimit(l1, 2048)\n"); torture_assert("memlimit", talloc_set_memlimit(l1, 2048) == 0, "failed: setting memlimit should never fail\n"); talloc_report_full(root, stdout); printf("==== talloc_size(root, 2048)\n"); l2 = talloc_size(l1, 2048); torture_assert("memlimit", l2 == NULL, "failed: alloc should fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_strdup(l1, level 2)\n"); l2 = talloc_strdup(l1, "level 2"); torture_assert("memlimit", l2 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_free(l2)\n"); talloc_free(l2); talloc_report_full(root, stdout); printf("==== talloc_size(NULL, 2048)\n"); l2 = talloc_size(NULL, 2048); talloc_report_full(root, stdout); printf("==== talloc_steal(l1, l2)\n"); talloc_steal(l1, l2); talloc_report_full(root, stdout); printf("==== talloc_strdup(l2, level 3)\n"); l3 = talloc_strdup(l2, "level 3"); torture_assert("memlimit", l3 == NULL, "failed: alloc should fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_free(l2)\n"); talloc_free(l2); talloc_report_full(root, stdout); printf("==== talloc_strdup(NULL, level 2)\n"); l2 = talloc_strdup(NULL, "level 2"); talloc_steal(l1, l2); talloc_report_full(root, stdout); printf("==== talloc_strdup(l2, level 3)\n"); l3 = talloc_strdup(l2, "level 3"); torture_assert("memlimit", l3 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_set_memlimit(l3, 1024)\n"); torture_assert("memlimit", talloc_set_memlimit(l3, 1024) == 0, "failed: setting memlimit should never fail\n"); talloc_report_full(root, stdout); printf("==== talloc_strdup(l3, level 4)\n"); l4 = talloc_strdup(l3, "level 4"); torture_assert("memlimit", l4 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_set_memlimit(l4, 512)\n"); torture_assert("memlimit", talloc_set_memlimit(l4, 512) == 0, "failed: setting memlimit should never fail\n"); talloc_report_full(root, stdout); printf("==== talloc_strdup(l4, level 5)\n"); l5 = talloc_strdup(l4, "level 5"); torture_assert("memlimit", l5 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_realloc(NULL, l5, char, 600)\n"); t = talloc_realloc(NULL, l5, char, 600); torture_assert("memlimit", t == NULL, "failed: alloc should fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_realloc(NULL, l5, char, 5)\n"); l5 = talloc_realloc(NULL, l5, char, 5); torture_assert("memlimit", l5 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_strdup(l3, level 4)\n"); l4 = talloc_strdup(l3, "level 4"); torture_assert("memlimit", l4 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== talloc_set_memlimit(l4, 512)\n"); torture_assert("memlimit", talloc_set_memlimit(l4, 512) == 0, "failed: setting memlimit should never fail\n"); talloc_report_full(root, stdout); printf("==== talloc_strdup(l4, level 5)\n"); l5 = talloc_strdup(l4, "level 5"); torture_assert("memlimit", l5 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); printf("==== Make new temp context and steal l5\n"); t = talloc_new(root); talloc_steal(t, l5); talloc_report_full(root, stdout); printf("==== talloc_size(t, 2048)\n"); l1 = talloc_size(t, 2048); torture_assert("memlimit", l1 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(root, stdout); talloc_free(root); /* Test memlimits with pools. */ printf("==== talloc_pool(NULL, 10*1024)\n"); pool = talloc_pool(NULL, 10*1024); torture_assert("memlimit", pool != NULL, "failed: alloc should not fail due to memory limit\n"); printf("==== talloc_set_memlimit(pool, 10*1024)\n"); talloc_set_memlimit(pool, 10*1024); for (i = 0; i < 9; i++) { printf("==== talloc_size(pool, 1024) %i/10\n", i + 1); l1 = talloc_size(pool, 1024); torture_assert("memlimit", l1 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(pool, stdout); } /* The next alloc should fail. */ printf("==== talloc_size(pool, 1024) 10/10\n"); l2 = talloc_size(pool, 1024); torture_assert("memlimit", l2 == NULL, "failed: alloc should fail due to memory limit\n"); talloc_report_full(pool, stdout); /* Moving one of the children shouldn't change the limit, as it's still inside the pool. */ printf("==== talloc_new(NULL)\n"); root = talloc_new(NULL); printf("==== talloc_steal(root, l1)\n"); talloc_steal(root, l1); printf("==== talloc_size(pool, 1024)\n"); l2 = talloc_size(pool, 1024); torture_assert("memlimit", l2 == NULL, "failed: alloc should fail due to memory limit\n"); printf("==== talloc_free_children(pool)\n"); talloc_free(l1); talloc_free_children(pool); printf("==== talloc_size(pool, 1024)\n"); l1 = talloc_size(pool, 1024); /* try reallocs of increasing size */ for (i = 1; i < 9; i++) { printf("==== talloc_realloc_size(NULL, l1, %i*1024) %i/10\n", i, i + 1); l1 = talloc_realloc_size(NULL, l1, i*1024); torture_assert("memlimit", l1 != NULL, "failed: realloc should not fail due to memory limit\n"); talloc_report_full(pool, stdout); } /* The next alloc should fail. */ printf("==== talloc_realloc_size(NULL, l1, 10*1024) 10/10\n"); l2 = talloc_realloc_size(NULL, l1, 10*1024); torture_assert("memlimit", l2 == NULL, "failed: realloc should fail due to memory limit\n"); /* Increase the memlimit */ printf("==== talloc_set_memlimit(pool, 11*1024)\n"); talloc_set_memlimit(pool, 11*1024); /* The final realloc should still fail as the entire realloced chunk needs to be moved out of the pool */ printf("==== talloc_realloc_size(NULL, l1, 10*1024) 10/10\n"); l2 = talloc_realloc_size(NULL, l1, 10*1024); torture_assert("memlimit", l2 == NULL, "failed: realloc should fail due to memory limit\n"); talloc_report_full(pool, stdout); printf("==== talloc_set_memlimit(pool, 21*1024)\n"); talloc_set_memlimit(pool, 21*1024); /* There's now sufficient space to move the chunk out of the pool */ printf("==== talloc_realloc_size(NULL, l1, 10*1024) 10/10\n"); l2 = talloc_realloc_size(NULL, l1, 10*1024); torture_assert("memlimit", l2 != NULL, "failed: realloc should not fail due to memory limit\n"); talloc_report_full(pool, stdout); /* ...which should mean smaller allocations can now occur within the pool */ printf("==== talloc_size(pool, 9*1024)\n"); l1 = talloc_size(pool, 9*1024); torture_assert("memlimit", l1 != NULL, "failed: new allocations should be allowed in the pool\n"); talloc_report_full(pool, stdout); /* But reallocs bigger than the pool will still fail */ printf("==== talloc_realloc_size(NULL, l1, 10*1024)\n"); l2 = talloc_realloc_size(NULL, l1, 10*1024); torture_assert("memlimit", l2 == NULL, "failed: realloc should fail due to memory limit\n"); talloc_report_full(pool, stdout); /* ..as well as allocs */ printf("==== talloc_size(pool, 1024)\n"); l1 = talloc_size(pool, 1024); torture_assert("memlimit", l1 == NULL, "failed: alloc should fail due to memory limit\n"); talloc_report_full(pool, stdout); printf("==== talloc_free_children(pool)\n"); talloc_free_children(pool); printf("==== talloc_set_memlimit(pool, 1024)\n"); talloc_set_memlimit(pool, 1024); /* We should still be able to allocate up to the pool limit because the memlimit only applies to new heap allocations */ printf("==== talloc_size(pool, 9*1024)\n"); l1 = talloc_size(pool, 9*1024); torture_assert("memlimit", l1 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(pool, stdout); l1 = talloc_size(pool, 1024); torture_assert("memlimit", l1 == NULL, "failed: alloc should fail due to memory limit\n"); talloc_report_full(pool, stdout); printf("==== talloc_free_children(pool)\n"); talloc_free_children(pool); printf("==== talloc_set_memlimit(pool, 10*1024)\n"); talloc_set_memlimit(pool, 10*1024); printf("==== talloc_size(pool, 1024)\n"); l1 = talloc_size(pool, 1024); torture_assert("memlimit", l1 != NULL, "failed: alloc should not fail due to memory limit\n"); talloc_report_full(pool, stdout); talloc_free(pool); talloc_free(root); printf("success: memlimit\n"); return true; } #ifdef HAVE_PTHREAD #define NUM_THREADS 100 /* Sync variables. */ static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER; static void *intermediate_ptr; /* Subthread. */ static void *thread_fn(void *arg) { int ret; const char *ctx_name = (const char *)arg; void *sub_ctx = NULL; /* * Do stuff that creates a new talloc hierarchy in * this thread. */ void *top_ctx = talloc_named_const(NULL, 0, "top"); if (top_ctx == NULL) { return NULL; } sub_ctx = talloc_named_const(top_ctx, 100, ctx_name); if (sub_ctx == NULL) { return NULL; } /* * Now transfer a pointer from our hierarchy * onto the intermediate ptr. */ ret = pthread_mutex_lock(&mtx); if (ret != 0) { talloc_free(top_ctx); return NULL; } /* Wait for intermediate_ptr to be free. */ while (intermediate_ptr != NULL) { ret = pthread_cond_wait(&condvar, &mtx); if (ret != 0) { talloc_free(top_ctx); ret = pthread_mutex_unlock(&mtx); assert(ret == 0); return NULL; } } /* and move our memory onto it from our toplevel hierarchy. */ intermediate_ptr = talloc_move(NULL, &sub_ctx); /* Tell the main thread it's ready for pickup. */ pthread_cond_broadcast(&condvar); ret = pthread_mutex_unlock(&mtx); assert(ret == 0); talloc_free(top_ctx); return NULL; } /* Main thread. */ static bool test_pthread_talloc_passing(void) { int i; int ret; char str_array[NUM_THREADS][20]; pthread_t thread_id; void *mem_ctx; /* * Important ! Null tracking breaks threaded talloc. * It *must* be turned off. */ talloc_disable_null_tracking(); printf("test: pthread_talloc_passing\n# PTHREAD TALLOC PASSING\n"); /* Main thread toplevel context. */ mem_ctx = talloc_named_const(NULL, 0, "toplevel"); if (mem_ctx == NULL) { printf("failed to create toplevel context\n"); return false; } /* * Spin off NUM_THREADS threads. * They will use their own toplevel contexts. */ for (i = 0; i < NUM_THREADS; i++) { ret = snprintf(str_array[i], 20, "thread:%d", i); if (ret < 0) { printf("snprintf %d failed\n", i); return false; } ret = pthread_create(&thread_id, NULL, thread_fn, str_array[i]); if (ret != 0) { printf("failed to create thread %d (%d)\n", i, ret); return false; } } printf("Created %d threads\n", NUM_THREADS); /* Now wait for NUM_THREADS transfers of the talloc'ed memory. */ for (i = 0; i < NUM_THREADS; i++) { ret = pthread_mutex_lock(&mtx); if (ret != 0) { printf("pthread_mutex_lock %d failed (%d)\n", i, ret); talloc_free(mem_ctx); return false; } /* Wait for intermediate_ptr to have our data. */ while (intermediate_ptr == NULL) { ret = pthread_cond_wait(&condvar, &mtx); if (ret != 0) { printf("pthread_cond_wait %d failed (%d)\n", i, ret); talloc_free(mem_ctx); ret = pthread_mutex_unlock(&mtx); assert(ret == 0); } } /* and move it onto our toplevel hierarchy. */ (void)talloc_move(mem_ctx, &intermediate_ptr); /* Tell the sub-threads we're ready for another. */ pthread_cond_broadcast(&condvar); ret = pthread_mutex_unlock(&mtx); assert(ret == 0); } CHECK_SIZE("pthread_talloc_passing", mem_ctx, NUM_THREADS * 100); #if 1 /* Dump the hierarchy. */ talloc_report(mem_ctx, stdout); #endif talloc_free(mem_ctx); printf("success: pthread_talloc_passing\n"); return true; } #endif static void test_magic_protection_abort(const char *reason) { /* exit with errcode 42 to communicate successful test to the parent process */ if (strcmp(reason, "Bad talloc magic value - unknown value") == 0) { _exit(42); } else { printf("talloc aborted for an unexpected reason\n"); } } static int test_magic_protection_destructor(int *ptr) { _exit(404); /* Not 42 */ } static bool test_magic_protection(void) { void *pool = talloc_pool(NULL, 1024); int *p1, *p2; pid_t pid; int exit_status; printf("test: magic_protection\n"); p1 = talloc(pool, int); p2 = talloc(pool, int); /* To avoid complaints from the compiler assign values to the p1 & p2. */ *p1 = 6; *p2 = 9; pid = fork(); if (pid == 0) { talloc_set_abort_fn(test_magic_protection_abort); talloc_set_destructor(p2, test_magic_protection_destructor); /* * Simulate a security attack * by triggering a buffer overflow in memset to overwrite the * constructor in the next pool chunk. * * Real attacks would attempt to set a real destructor. */ memset(p1, '\0', 32); /* Then the attack takes effect when the memory's freed. */ talloc_free(pool); /* Never reached. Make compilers happy */ return true; } while (wait(&exit_status) != pid); talloc_free(pool); /* make ASAN happy */ if (!WIFEXITED(exit_status)) { printf("Child exited through unexpected abnormal means\n"); return false; } if (WEXITSTATUS(exit_status) != 42) { printf("Child exited with wrong exit status\n"); return false; } if (WIFSIGNALED(exit_status)) { printf("Child received unexpected signal\n"); return false; } printf("success: magic_protection\n"); return true; } static void test_magic_free_protection_abort(const char *reason) { /* exit with errcode 42 to communicate successful test to the parent process */ if (strcmp(reason, "Bad talloc magic value - access after free") == 0) { _exit(42); } /* not 42 */ _exit(404); } static bool test_magic_free_protection(void) { void *pool = talloc_pool(NULL, 1024); int *p1, *p2, *p3; pid_t pid; int exit_status; printf("test: magic_free_protection\n"); p1 = talloc(pool, int); p2 = talloc(pool, int); /* To avoid complaints from the compiler assign values to the p1 & p2. */ *p1 = 6; *p2 = 9; p3 = talloc_realloc(pool, p2, int, 2048); torture_assert("pool realloc 2048", p3 != p2, "failed: pointer not changed"); /* * Now access the memory in the pool after the realloc(). It * should be marked as free, so use of the old pointer should * trigger the abort function */ pid = fork(); if (pid == 0) { talloc_set_abort_fn(test_magic_free_protection_abort); talloc_get_name(p2); /* Never reached. Make compilers happy */ return true; } while (wait(&exit_status) != pid); if (!WIFEXITED(exit_status)) { printf("Child exited through unexpected abnormal means\n"); return false; } if (WEXITSTATUS(exit_status) != 42) { printf("Child exited with wrong exit status\n"); return false; } if (WIFSIGNALED(exit_status)) { printf("Child received unexpected signal\n"); return false; } talloc_free(pool); printf("success: magic_free_protection\n"); return true; } static void test_reset(void) { talloc_set_log_fn(test_log_stdout); test_abort_stop(); talloc_disable_null_tracking(); talloc_enable_null_tracking_no_autofree(); } bool torture_local_talloc(struct torture_context *tctx) { bool ret = true; setlinebuf(stdout); test_reset(); ret &= test_pooled_object(); test_reset(); ret &= test_pool_nest(); test_reset(); ret &= test_ref1(); test_reset(); ret &= test_ref2(); test_reset(); ret &= test_ref3(); test_reset(); ret &= test_ref4(); test_reset(); ret &= test_unlink1(); test_reset(); ret &= test_misc(); test_reset(); ret &= test_realloc(); test_reset(); ret &= test_realloc_child(); test_reset(); ret &= test_steal(); test_reset(); ret &= test_move(); test_reset(); ret &= test_unref_reparent(); test_reset(); ret &= test_realloc_fn(); test_reset(); ret &= test_type(); test_reset(); ret &= test_lifeless(); test_reset(); ret &= test_loop(); test_reset(); ret &= test_free_parent_deny_child(); test_reset(); ret &= test_realloc_on_destructor_parent(); test_reset(); ret &= test_free_parent_reparent_child(); test_reset(); ret &= test_free_parent_reparent_child_in_pool(); test_reset(); ret &= test_talloc_ptrtype(); test_reset(); ret &= test_talloc_free_in_destructor(); test_reset(); ret &= test_pool(); test_reset(); ret &= test_pool_steal(); test_reset(); ret &= test_free_ref_null_context(); test_reset(); ret &= test_rusty(); test_reset(); ret &= test_free_children(); test_reset(); ret &= test_memlimit(); #ifdef HAVE_PTHREAD test_reset(); ret &= test_pthread_talloc_passing(); #endif if (ret) { test_reset(); ret &= test_speed(); } test_reset(); ret &= test_autofree(); test_reset(); ret &= test_magic_protection(); test_reset(); ret &= test_magic_free_protection(); test_reset(); talloc_disable_null_tracking(); return ret; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/testsuite_main.c0000660000000000000000000000207100000000000020260 0ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. local testing of talloc routines. Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "talloc_testsuite.h" int main(void) { bool ret = torture_local_talloc(NULL); if (!ret) return -1; return 0; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615788.0132055 tevent-0.11.0/lib/talloc/web/index.html0000660000000000000000000000313000000000000017626 0ustar00rootroot00000000000000 talloc

talloc

talloc is a hierarchical pool based memory allocator with destructors. It is the core memory allocator used in Samba, and has made a huge difference in many aspects of Samba4 development.

To get started with talloc, I would recommend you read the talloc guide.

Download

You can download the latest releases of talloc from the talloc directory on the samba public source archive.

Discussion and bug reports

talloc does not currently have its own mailing list or bug tracking system. For now, please use the samba-technical mailing list, and the Samba bugzilla bug tracking system.

Development

You can download the latest code either via git or rsync.

To fetch via git see the following guide:
Using Git for Samba Development
Once you have cloned the tree switch to the master branch and cd into the lib/talloc directory.

To fetch via rsync use this command:
  rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/talloc .

Andrew Tridgell
talloc AT tridgell.net
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4297116 tevent-0.11.0/lib/talloc/wscript0000660000000000000000000001556600000000000016512 0ustar00rootroot00000000000000#!/usr/bin/env python APPNAME = 'talloc' VERSION = '2.3.2' import os import sys # find the buildtools directory top = '.' while not os.path.exists(top+'/buildtools') and len(top.split('/')) < 5: top = top + '/..' sys.path.insert(0, top + '/buildtools/wafsamba') out = 'bin' import wafsamba from wafsamba import samba_dist, samba_utils from waflib import Logs, Options, Context # setup what directories to put in a tarball samba_dist.DIST_DIRS("""lib/talloc:. lib/replace:lib/replace buildtools:buildtools third_party/waf:third_party/waf""") def options(opt): opt.BUILTIN_DEFAULT('replace') opt.PRIVATE_EXTENSION_DEFAULT('talloc', noextension='talloc') opt.RECURSE('lib/replace') if opt.IN_LAUNCH_DIR(): opt.add_option('--enable-talloc-compat1', help=("Build talloc 1.x.x compat library [False]"), action="store_true", dest='TALLOC_COMPAT1', default=False) def configure(conf): conf.RECURSE('lib/replace') conf.env.standalone_talloc = conf.IN_LAUNCH_DIR() conf.define('TALLOC_BUILD_VERSION_MAJOR', int(VERSION.split('.')[0])) conf.define('TALLOC_BUILD_VERSION_MINOR', int(VERSION.split('.')[1])) conf.define('TALLOC_BUILD_VERSION_RELEASE', int(VERSION.split('.')[2])) conf.env.TALLOC_COMPAT1 = False if conf.env.standalone_talloc: conf.env.TALLOC_COMPAT1 = Options.options.TALLOC_COMPAT1 conf.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig' conf.env.TALLOC_VERSION = VERSION conf.CHECK_XSLTPROC_MANPAGES() conf.CHECK_HEADERS('sys/auxv.h') conf.CHECK_FUNCS('getauxval') conf.SAMBA_CONFIG_H() conf.SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS() conf.SAMBA_CHECK_PYTHON() conf.SAMBA_CHECK_PYTHON_HEADERS() if not conf.env.standalone_talloc: if conf.CHECK_BUNDLED_SYSTEM_PKG('talloc', minversion=VERSION, implied_deps='replace'): conf.define('USING_SYSTEM_TALLOC', 1) if conf.env.disable_python: using_system_pytalloc_util = False else: using_system_pytalloc_util = True name = 'pytalloc-util' + conf.all_envs['default']['PYTHON_SO_ABI_FLAG'] if not conf.CHECK_BUNDLED_SYSTEM_PKG(name, minversion=VERSION, implied_deps='talloc replace'): using_system_pytalloc_util = False if using_system_pytalloc_util: conf.define('USING_SYSTEM_PYTALLOC_UTIL', 1) def build(bld): bld.RECURSE('lib/replace') if bld.env.standalone_talloc: private_library = False # should we also install the symlink to libtalloc1.so here? bld.SAMBA_LIBRARY('talloc-compat1-%s' % (VERSION), 'compat/talloc_compat1.c', public_deps='talloc', soname='libtalloc.so.1', pc_files=[], public_headers=[], enabled=bld.env.TALLOC_COMPAT1) testsuite_deps = 'talloc' if bld.CONFIG_SET('HAVE_PTHREAD'): testsuite_deps += ' pthread' bld.SAMBA_BINARY('talloc_testsuite', 'testsuite_main.c testsuite.c', testsuite_deps, install=False) bld.SAMBA_BINARY('talloc_test_magic_differs_helper', 'test_magic_differs_helper.c', 'talloc', install=False) else: private_library = True if not bld.CONFIG_SET('USING_SYSTEM_TALLOC'): bld.SAMBA_LIBRARY('talloc', 'talloc.c', deps='replace', abi_directory='ABI', abi_match='talloc* _talloc*', hide_symbols=True, vnum=VERSION, public_headers=('' if private_library else 'talloc.h'), pc_files='talloc.pc', public_headers_install=not private_library, private_library=private_library, manpages='man/talloc.3') if not bld.CONFIG_SET('USING_SYSTEM_PYTALLOC_UTIL'): name = bld.pyembed_libname('pytalloc-util') bld.SAMBA_LIBRARY(name, source='pytalloc_util.c', public_deps='talloc', pyembed=True, vnum=VERSION, hide_symbols=True, abi_directory='ABI', abi_match='pytalloc_* _pytalloc_*', private_library=private_library, public_headers=('' if private_library else 'pytalloc.h'), pc_files='pytalloc-util.pc', enabled=bld.PYTHON_BUILD_IS_ENABLED() ) bld.SAMBA_PYTHON('pytalloc', 'pytalloc.c', deps='talloc ' + name, enabled=bld.PYTHON_BUILD_IS_ENABLED(), realname='talloc.so') bld.SAMBA_PYTHON('test_pytalloc', 'test_pytalloc.c', deps=name, enabled=bld.PYTHON_BUILD_IS_ENABLED(), realname='_test_pytalloc.so', install=False) def testonly(ctx): '''run talloc testsuite''' import samba_utils samba_utils.ADD_LD_LIBRARY_PATH('bin/shared') samba_utils.ADD_LD_LIBRARY_PATH('bin/shared/private') cmd = os.path.join(Context.g_module.out, 'talloc_testsuite') ret = samba_utils.RUN_COMMAND(cmd) print("testsuite returned %d" % ret) magic_helper_cmd = os.path.join(Context.g_module.out, 'talloc_test_magic_differs_helper') magic_cmd = os.path.join(Context.g_module.top, 'lib', 'talloc', 'test_magic_differs.sh') if not os.path.exists(magic_cmd): magic_cmd = os.path.join(Context.g_module.top, 'test_magic_differs.sh') magic_ret = samba_utils.RUN_COMMAND(magic_cmd + " " + magic_helper_cmd) print("magic differs test returned %d" % magic_ret) pyret = samba_utils.RUN_PYTHON_TESTS(['test_pytalloc.py']) print("python testsuite returned %d" % pyret) sys.exit(ret or magic_ret or pyret) # WAF doesn't build the unit tests for this, maybe because they don't link with talloc? # This forces it def test(ctx): Options.commands.append('build') Options.commands.append('testonly') def dist(): '''makes a tarball for distribution''' samba_dist.dist() def reconfigure(ctx): '''reconfigure if config scripts have changed''' samba_utils.reconfigure(ctx) def pydoctor(ctx): '''build python apidocs''' cmd='PYTHONPATH=bin/python pydoctor --project-name=talloc --project-url=http://talloc.samba.org/ --make-html --docformat=restructuredtext --introspect-c-modules --add-module bin/python/talloc.*' print("Running: %s" % cmd) os.system(cmd) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/README0000660000000000000000000000056200000000000016076 0ustar00rootroot00000000000000See http://code.google.com/p/waf/ for more information on waf You can get a svn copy of the upstream source with: svn checkout http://waf.googlecode.com/svn/trunk/ waf-read-only Samba currently uses waf 1.5, which can be found at: http://waf.googlecode.com/svn/branches/waf-1.5 To update the current copy of waf, use the update-waf.sh script in this directory. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1116025 tevent-0.11.0/buildtools/bin/waf0000770000000000000000000001037400000000000016472 0ustar00rootroot00000000000000#!/usr/bin/env python3 # encoding: latin-1 # Thomas Nagy, 2005-2018 # """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os, sys, inspect VERSION="2.0.21" REVISION="x" GIT="x" INSTALL="x" C1='x' C2='x' C3='x' cwd = os.getcwd() join = os.path.join if sys.hexversion<0x206000f: raise ImportError('Python >= 2.6 is required to create the waf file') WAF='waf' def b(x): return x if sys.hexversion>0x300000f: WAF='waf3' def b(x): return x.encode() def err(m): print(('\033[91mError: %s\033[0m' % m)) sys.exit(1) def unpack_wafdir(dir, src): f = open(src,'rb') c = 'corrupt archive (%d)' while 1: line = f.readline() if not line: err('run waf-light from a folder containing waflib') if line == b('#==>\n'): txt = f.readline() if not txt: err(c % 1) if f.readline() != b('#<==\n'): err(c % 2) break if not txt: err(c % 3) txt = txt[1:-1].replace(b(C1), b('\n')).replace(b(C2), b('\r')).replace(b(C3), b('\x00')) import shutil, tarfile try: shutil.rmtree(dir) except OSError: pass try: for x in ('Tools', 'extras'): os.makedirs(join(dir, 'waflib', x)) except OSError: err("Cannot unpack waf lib into %s\nMove waf in a writable directory" % dir) os.chdir(dir) tmp = 't.bz2' t = open(tmp,'wb') try: t.write(txt) finally: t.close() try: t = tarfile.open(tmp) except: try: os.system('bunzip2 t.bz2') t = tarfile.open('t') tmp = 't' except: os.chdir(cwd) try: shutil.rmtree(dir) except OSError: pass err("Waf cannot be unpacked, check that bzip2 support is present") try: for x in t: t.extract(x) finally: t.close() for x in ('Tools', 'extras'): os.chmod(join('waflib',x), 493) if sys.hexversion<0x300000f: sys.path = [join(dir, 'waflib')] + sys.path import fixpy2 fixpy2.fixdir(dir) os.remove(tmp) os.chdir(cwd) try: dir = unicode(dir, 'mbcs') except: pass try: from ctypes import windll windll.kernel32.SetFileAttributesW(dir, 2) except: pass def test(dir): try: os.stat(join(dir, 'waflib')) return os.path.abspath(dir) except OSError: pass def find_lib(): path = '../../third_party/waf' paths = [path, path+'/waflib'] return [os.path.abspath(os.path.join(os.path.dirname(__file__), x)) for x in paths] wafdir = find_lib() for p in wafdir: sys.path.insert(0, p) if __name__ == '__main__': #import extras.compat15#PRELUDE import sys from waflib.Tools import ccroot, c, ar, compiler_c, gcc sys.modules['cc'] = c sys.modules['ccroot'] = ccroot sys.modules['ar'] = ar sys.modules['compiler_cc'] = compiler_c sys.modules['gcc'] = gcc from waflib import Options Options.lockfile = os.environ.get('WAFLOCK', '.lock-wscript') if os.path.isfile(Options.lockfile) and os.stat(Options.lockfile).st_size == 0: os.environ['NOCLIMB'] = "1" # there is a single top-level, but libraries must build independently os.environ['NO_LOCK_IN_TOP'] = "1" from waflib import Task class o(object): display = None Task.classes['cc_link'] = o from waflib import Scripting Scripting.waf_entry_point(cwd, VERSION, wafdir[0]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/compare_config_h4.sh0000770000000000000000000000050200000000000021114 0ustar00rootroot00000000000000#!/bin/sh # compare the generated config.h from a waf build with existing samba # build grep "^.define" bin/default/source4/include/config.h | sort > waf-config.h grep "^.define" $HOME/samba_old/source4/include/config.h | sort > old-config.h comm -23 old-config.h waf-config.h #echo #diff -u old-config.h waf-config.h ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/compare_generated.sh0000770000000000000000000000251200000000000021215 0ustar00rootroot00000000000000#!/bin/sh # compare the generated files from a waf old_build=$HOME/samba_old gen_files=$(cd bin/default && find . -type f -name '*.[ch]') 2>&1 strip_file() { in_file=$1 out_file=$2 cat $in_file | grep -v 'The following definitions come from' | grep -v 'Automatically generated at' | grep -v 'Generated from' | sed 's|/home/tnagy/samba/source4||g' | sed 's|/home/tnagy/samba/|../|g' | sed 's|bin/default/source4/||g' | sed 's|bin/default/|../|g' | sed 's/define _____/define ___/g' | sed 's/define __*/define _/g' | sed 's/define _DEFAULT_/define _/g' | sed 's/define _SOURCE4_/define ___/g' | sed 's/define ___/define _/g' | sed 's/ifndef ___/ifndef _/g' | sed 's|endif /* ____|endif /* __|g' | sed s/__DEFAULT_SOURCE4/__/ | sed s/__DEFAULT_SOURCE4/__/ | sed s/__DEFAULT/____/ > $out_file } compare_file() { f=$f bname=$(basename $f) t1=/tmp/$bname.old.$$ t2=/tmp/$bname.new.$$ strip_file $old_build/$f $t1 strip_file bin/default/$f $t2 diff -u -b $t1 $t2 2>&1 rm -f $t1 $t2 } for f in $gen_files; do compare_file $f done ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/compare_install.sh0000770000000000000000000000021200000000000020720 0ustar00rootroot00000000000000#!/bin/sh prefix1="$1" prefix2="$2" (cd $prefix1 && find . ) | sort > p1.txt (cd $prefix2 && find . ) | sort > p2.txt diff -u p[12].txt ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/examples/run_on_target.py0000770000000000000000000001162100000000000022254 0ustar00rootroot00000000000000#!/usr/bin/env python3 # # Sample run-on-target script # This is a script that can be used as cross-execute parameter to samba # configuration process, running the command on a remote target for which # the cross-compiled configure test was compiled. # # To use: # ./configure \ # --cross-compile \ # '--cross-execute=./buildtools/example/run_on_target.py --host=' # # A more elaborate example: # ./configure \ # --cross-compile \ # '--cross-execute=./buildtools/example/run_on_target.py --host= --user= "--ssh=ssh -i " --destdir=/path/to/dir' # # Typically this is to be used also with --cross-answers, so that the # cross answers file gets built and further builds can be made without # the help of a remote target. # # The following assumptions are made: # 1. rsync is available on build machine and target machine # 2. A running ssh service on target machine with password-less shell login # 3. A directory writable by the password-less login user # 4. The tests on the target can run and provide reliable results # from the login account's home directory. This is significant # for example in locking tests which # create files in the current directory. As a workaround to this # assumption, the TESTDIR environment variable can be set on the target # (using ssh command line or server config) and the tests shall # chdir to that directory. # import sys import os import subprocess from optparse import OptionParser # those are defaults, but can be overidden using command line SSH = 'ssh' USER = None HOST = 'localhost' def xfer_files(ssh, srcdir, host, user, targ_destdir): """Transfer executable files to target Use rsync to copy the directory containing program to run INTO a destination directory on the target. An exact copy of the source directory is created on the target machine, possibly deleting files on the target machine which do not exist on the source directory. The idea is that the test may include files in addition to the compiled binary, and all of those files reside alongside the binary in a source directory. For example, if the test to run is /foo/bar/test and the destination directory on the target is /tbaz, then /tbaz/bar on the target shall be an exact copy of /foo/bar on the source, including deletion of files inside /tbaz/bar which do not exist on the source. """ userhost = host if user: userhost = '%s@%s' % (user, host) cmd = 'rsync --verbose -rl --ignore-times --delete -e "%s" %s %s:%s/' % \ (ssh, srcdir, userhost, targ_destdir) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() if p.returncode != 0: raise Exception('failed syncing files\n stdout:\n%s\nstderr:%s\n' % (out, err)) def exec_remote(ssh, host, user, destdir, targdir, prog, args): """Run a test on the target Using password-less ssh, run the compiled binary on the target. An assumption is that there's no need to cd into the target dir, same as there's no need to do it on a native build. """ userhost = host if user: userhost = '%s@%s' % (user, host) cmd = '%s %s %s/%s/%s' % (ssh, userhost, destdir, targdir, prog) if args: cmd = cmd + ' ' + ' '.join(args) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() return (p.returncode, out) def main(argv): usage = "usage: %prog [options] [args]" parser = OptionParser(usage) parser.add_option('--ssh', help="SSH client and additional flags", default=SSH) parser.add_option('--host', help="target host name or IP address", default=HOST) parser.add_option('--user', help="login user on target", default=USER) parser.add_option('--destdir', help="work directory on target", default='~') (options, args) = parser.parse_args(argv) if len(args) < 1: parser.error("please supply test program to run") progpath = args[0] # assume that a test that was not compiled fails (e.g. getconf) if progpath[0] != '/': return (1, "") progdir = os.path.dirname(progpath) prog = os.path.basename(progpath) targ_progdir = os.path.basename(progdir) xfer_files( options.ssh, progdir, options.host, options.user, options.destdir) (rc, out) = exec_remote(options.ssh, options.host, options.user, options.destdir, targ_progdir, prog, args[1:]) return (rc, out) if __name__ == '__main__': (rc, out) = main(sys.argv[1:]) sys.stdout.write(out) sys.exit(rc) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/scripts/Makefile.waf0000660000000000000000000000176700000000000021131 0ustar00rootroot00000000000000# simple makefile wrapper to run waf WAF_BINARY=BUILDTOOLS/bin/waf WAF=WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) help: @echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH $(WAF) --help testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: $(WAF) dist distcheck: $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags bin/%:: FORCE $(WAF) --targets=$@ FORCE: configure: autogen-waf.sh BUILDTOOLS/scripts/configure.waf ./autogen-waf.sh Makefile: autogen-waf.sh configure BUILDTOOLS/scripts/Makefile.waf ./autogen-waf.sh ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/scripts/abi_gen.sh0000770000000000000000000000075600000000000020634 0ustar00rootroot00000000000000#!/bin/sh # generate a set of ABI signatures from a shared library SHAREDLIB="$1" GDBSCRIPT="gdb_syms.$$" ( cat < $GDBSCRIPT # forcing the terminal avoids a problem on Fedora12 TERM=none gdb -n -batch -x $GDBSCRIPT "$SHAREDLIB" < /dev/null rm -f $GDBSCRIPT ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/scripts/autogen-waf.sh0000770000000000000000000000135200000000000021456 0ustar00rootroot00000000000000#!/bin/sh p=`dirname $0` echo "Setting up for waf build" echo "Looking for the buildtools directory" d="buildtools" while test \! -d "$p/$d"; do d="../$d"; done echo "Found buildtools in $p/$d" echo "Setting up configure" rm -f $p/configure $p/include/config*.h* sed "s|BUILDTOOLS|$d|g;s|BUILDPATH|$p|g" < "$p/$d/scripts/configure.waf" > $p/configure chmod +x $p/configure echo "Setting up Makefile" rm -f $p/makefile $p/Makefile sed "s|BUILDTOOLS|$d|g" < "$p/$d/scripts/Makefile.waf" > $p/Makefile echo "done. Now run $p/configure or $p/configure.developer then make." if [ $p != "." ]; then echo "Notice: The build invoke path is not 'source4'! Use make with the parameter" echo "-C <'source4' path>. Example: make -C source4 all" fi ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/scripts/configure.waf0000770000000000000000000000037100000000000021365 0ustar00rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` WAF=BUILDTOOLS/bin/waf # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd BUILDPATH || exit 1 $WAF configure "$@" || exit 1 cd $PREVPATH ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/testwaf.sh0000770000000000000000000000260000000000000017224 0ustar00rootroot00000000000000#!/bin/bash set -e set -x d=$(dirname $0) cd $d/.. PREFIX=$HOME/testprefix if [ $# -gt 0 ]; then tests="$*" else tests="lib/replace lib/talloc lib/tevent lib/tdb lib/ldb" fi echo "testing in dirs $tests" for d in $tests; do echo "`date`: testing $d" pushd $d rm -rf bin type waf waf dist ./configure -C --enable-developer --prefix=$PREFIX time make make install make distcheck case $d in "lib/ldb") ldd bin/ldbadd ;; "lib/replace") ldd bin/replace_testsuite ;; "lib/talloc") ldd bin/talloc_testsuite ;; "lib/tdb") ldd bin/tdbtool ;; esac popd done echo "testing python portability" pushd lib/talloc versions="python2.4 python2.5 python2.6 python3.0 python3.1" for p in $versions; do ret=$(which $p || echo "failed") if [ $ret = "failed" ]; then echo "$p not found, skipping" continue fi echo "Testing $p" $p ../../buildtools/bin/waf configure -C --enable-developer --prefix=$PREFIX $p ../../buildtools/bin/waf build install done popd echo "testing cross compiling" pushd lib/talloc ret=$(which arm-linux-gnueabi-gcc || echo "failed") if [ $ret != "failed" ]; then CC=arm-linux-gnueabi-gcc ./configure -C --prefix=$PREFIX --cross-compile --cross-execute='runarm' make && make install else echo "Cross-compiler not installed, skipping test" fi popd ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/README0000660000000000000000000000037700000000000017663 0ustar00rootroot00000000000000This is a set of waf 'tools' to help make building the Samba components easier, by having common functions in one place. This gives us a more consistent build, and ensures that our project rules are obeyed TODO: see http://wiki.samba.org/index.php/Waf ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/__init__.py0000660000000000000000000000000000000000000021073 0ustar00rootroot00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/configure_file.py0000660000000000000000000000233100000000000022325 0ustar00rootroot00000000000000# handle substitution of variables in .in files import sys import re import os from waflib import Build, Logs from samba_utils import SUBST_VARS_RECURSIVE def subst_at_vars(task): '''substiture @VAR@ style variables in a file''' env = task.env s = task.inputs[0].read() # split on the vars a = re.split('(@\w+@)', s) out = [] for v in a: if re.match('@\w+@', v): vname = v[1:-1] if not vname in task.env and vname.upper() in task.env: vname = vname.upper() if not vname in task.env: Logs.error("Unknown substitution %s in %s" % (v, task.name)) sys.exit(1) v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) out.append(v) contents = ''.join(out) task.outputs[0].write(contents) return 0 def CONFIGURE_FILE(bld, in_file, **kwargs): '''configure file''' base=os.path.basename(in_file) t = bld.SAMBA_GENERATOR('INFILE_%s' % base, rule = subst_at_vars, source = in_file + '.in', target = in_file, vars = kwargs) Build.BuildContext.CONFIGURE_FILE = CONFIGURE_FILE ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/generic_cc.py0000660000000000000000000000324400000000000021432 0ustar00rootroot00000000000000 # compiler definition for a generic C compiler # based on suncc.py from waf import os, optparse from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf # # Let waflib provide useful defaults, but # provide generic_cc as last resort fallback on # all platforms # from waflib.Tools.compiler_c import c_compiler for key in c_compiler.keys(): c_compiler[key].append('generic_cc') @conf def find_generic_cc(conf): v = conf.env cc = None if v.CC: cc = v.CC elif 'CC' in conf.environ: cc = conf.environ['CC'] if not cc: cc = conf.find_program('cc', var='CC') if not cc: conf.fatal('generic_cc was not found') try: conf.cmd_and_log(cc + ['--version']) except Errors.WafError: conf.fatal('%r --version could not be executed' % cc) v.CC = cc v.CC_NAME = 'generic_cc' @conf def generic_cc_common_flags(conf): v = conf.env v.CC_SRC_F = '' v.CC_TGT_F = ['-c', '-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o'] v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.cprogram_PATTERN = '%s' v.cshlib_PATTERN = 'lib%s.so' v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_generic_cc() conf.find_ar() conf.generic_cc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/pkgconfig.py0000660000000000000000000000457600000000000021331 0ustar00rootroot00000000000000# handle substitution of variables in pc files import os, re, sys from waflib import Build, Logs from samba_utils import SUBST_VARS_RECURSIVE, TO_LIST def subst_at_vars(task): '''substiture @VAR@ style variables in a file''' s = task.inputs[0].read() # split on the vars a = re.split('(@\w+@)', s) out = [] done_var = {} back_sub = [ ('PREFIX', '${prefix}'), ('EXEC_PREFIX', '${exec_prefix}')] for v in a: if re.match('@\w+@', v): vname = v[1:-1] if not vname in task.env and vname.upper() in task.env: vname = vname.upper() if not vname in task.env: Logs.error("Unknown substitution %s in %s" % (v, task.name)) sys.exit(1) v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) # now we back substitute the allowed pc vars for (b, m) in back_sub: s = task.env[b] if s == v[0:len(s)]: if not b in done_var: # we don't want to substitute the first usage done_var[b] = True else: v = m + v[len(s):] break out.append(v) contents = ''.join(out) task.outputs[0].write(contents) return 0 def PKG_CONFIG_FILES(bld, pc_files, vnum=None, extra_name=None): '''install some pkg_config pc files''' dest = '${PKGCONFIGDIR}' dest = bld.EXPAND_VARIABLES(dest) for f in TO_LIST(pc_files): if extra_name: target = f.split('.pc')[0] + extra_name + ".pc" else: target = f base=os.path.basename(target) t = bld.SAMBA_GENERATOR('PKGCONFIG_%s' % base, rule=subst_at_vars, source=f+'.in', target=target) bld.add_manual_dependency(bld.path.find_or_declare(f), bld.env['PREFIX'].encode('utf8')) t.vars = [] if t.env.RPATH_ON_INSTALL: t.env.LIB_RPATH = t.env.RPATH_ST % t.env.LIBDIR else: t.env.LIB_RPATH = '' if vnum: t.env.PACKAGE_VERSION = vnum for v in [ 'PREFIX', 'EXEC_PREFIX', 'LIB_RPATH' ]: t.vars.append(t.env[v]) bld.INSTALL_FILES(dest, target, flat=True, destname=base) Build.BuildContext.PKG_CONFIG_FILES = PKG_CONFIG_FILES ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3941045 tevent-0.11.0/buildtools/wafsamba/samba3.py0000660000000000000000000001001300000000000020507 0ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import os from waflib import Build from samba_utils import TO_LIST from samba_autoconf import library_flags def SAMBA3_IS_STATIC_MODULE(bld, module): '''Check whether module is in static list''' if module in bld.env['static_modules']: return True return False Build.BuildContext.SAMBA3_IS_STATIC_MODULE = SAMBA3_IS_STATIC_MODULE def SAMBA3_IS_SHARED_MODULE(bld, module): '''Check whether module is in shared list''' if module in bld.env['shared_modules']: return True return False Build.BuildContext.SAMBA3_IS_SHARED_MODULE = SAMBA3_IS_SHARED_MODULE def SAMBA3_IS_ENABLED_MODULE(bld, module): '''Check whether module is in either shared or static list ''' return SAMBA3_IS_STATIC_MODULE(bld, module) or SAMBA3_IS_SHARED_MODULE(bld, module) Build.BuildContext.SAMBA3_IS_ENABLED_MODULE = SAMBA3_IS_ENABLED_MODULE def s3_fix_kwargs(bld, kwargs): '''fix the build arguments for s3 build rules to include the necessary includes, subdir and cflags options ''' s3dir = os.path.join(bld.env.srcdir, 'source3') s3reldir = os.path.relpath(s3dir, bld.path.abspath()) # the extra_includes list is relative to the source3 directory extra_includes = [ '.', 'include', 'lib' ] # local heimdal paths only included when USING_SYSTEM_KRB5 is not set if not bld.CONFIG_SET("USING_SYSTEM_KRB5"): extra_includes += [ '../source4/heimdal/lib/com_err', '../source4/heimdal/lib/krb5', '../source4/heimdal/lib/gssapi', '../source4/heimdal/lib/gssapi/gssapi', '../source4/heimdal_build/include', '../bin/default/source4/heimdal/lib/asn1' ] if bld.CONFIG_SET('USING_SYSTEM_TDB'): (tdb_includes, tdb_ldflags, tdb_cpppath) = library_flags(bld, 'tdb') extra_includes += tdb_cpppath else: extra_includes += [ '../lib/tdb/include' ] if bld.CONFIG_SET('USING_SYSTEM_TEVENT'): (tevent_includes, tevent_ldflags, tevent_cpppath) = library_flags(bld, 'tevent') extra_includes += tevent_cpppath else: extra_includes += [ '../lib/tevent' ] if bld.CONFIG_SET('USING_SYSTEM_TALLOC'): (talloc_includes, talloc_ldflags, talloc_cpppath) = library_flags(bld, 'talloc') extra_includes += talloc_cpppath else: extra_includes += [ '../lib/talloc' ] if bld.CONFIG_SET('USING_SYSTEM_POPT'): (popt_includes, popt_ldflags, popt_cpppath) = library_flags(bld, 'popt') extra_includes += popt_cpppath else: extra_includes += [ '../lib/popt' ] # s3 builds assume that they will have a bunch of extra include paths includes = [] for d in extra_includes: includes += [ os.path.join(s3reldir, d) ] # the rule may already have some includes listed if 'includes' in kwargs: includes += TO_LIST(kwargs['includes']) kwargs['includes'] = includes # these wrappers allow for mixing of S3 and S4 build rules in the one build def SAMBA3_LIBRARY(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_LIBRARY(name, *args, **kwargs) Build.BuildContext.SAMBA3_LIBRARY = SAMBA3_LIBRARY def SAMBA3_MODULE(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_MODULE(name, *args, **kwargs) Build.BuildContext.SAMBA3_MODULE = SAMBA3_MODULE def SAMBA3_SUBSYSTEM(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_SUBSYSTEM(name, *args, **kwargs) Build.BuildContext.SAMBA3_SUBSYSTEM = SAMBA3_SUBSYSTEM def SAMBA3_BINARY(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_BINARY(name, *args, **kwargs) Build.BuildContext.SAMBA3_BINARY = SAMBA3_BINARY def SAMBA3_PYTHON(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_PYTHON(name, *args, **kwargs) Build.BuildContext.SAMBA3_PYTHON = SAMBA3_PYTHON ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3941045 tevent-0.11.0/buildtools/wafsamba/samba_abi.py0000660000000000000000000002151700000000000021252 0ustar00rootroot00000000000000# functions for handling ABI checking of libraries import os import sys import re import fnmatch from waflib import Options, Utils, Logs, Task, Build, Errors from waflib.TaskGen import feature, before, after from wafsamba import samba_utils # these type maps cope with platform specific names for common types # please add new type mappings into the list below abi_type_maps = { '_Bool' : 'bool', 'struct __va_list_tag *' : 'va_list' } version_key = lambda x: list(map(int, x.split("."))) def normalise_signature(sig): '''normalise a signature from gdb''' sig = sig.strip() sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig) sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig) sig = re.sub('^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig) sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig) sig = re.sub('", ', r'\1"', sig) for t in abi_type_maps: # we need to cope with non-word characters in mapped types m = t m = m.replace('*', '\*') if m[-1].isalnum() or m[-1] == '_': m += '\\b' if m[0].isalnum() or m[0] == '_': m = '\\b' + m sig = re.sub(m, abi_type_maps[t], sig) return sig def normalise_varargs(sig): '''cope with older versions of gdb''' sig = re.sub(',\s\.\.\.', '', sig) return sig def parse_sigs(sigs, abi_match): '''parse ABI signatures file''' abi_match = samba_utils.TO_LIST(abi_match) ret = {} a = sigs.split('\n') for s in a: if s.find(':') == -1: continue sa = s.split(':') if abi_match: matched = False negative = False for p in abi_match: if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]): negative = True break elif fnmatch.fnmatch(sa[0], p): matched = True break if (not matched) and negative: continue Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1]))) ret[sa[0]] = normalise_signature(sa[1]) return ret def save_sigs(sig_file, parsed_sigs): '''save ABI signatures to a file''' sigs = "".join('%s: %s\n' % (s, parsed_sigs[s]) for s in sorted(parsed_sigs.keys())) return samba_utils.save_file(sig_file, sigs, create_dir=True) def abi_check_task(self): '''check if the ABI has changed''' abi_gen = self.ABI_GEN libpath = self.inputs[0].abspath(self.env) libname = os.path.basename(libpath) sigs = samba_utils.get_string(Utils.cmd_output([abi_gen, libpath])) parsed_sigs = parse_sigs(sigs, self.ABI_MATCH) sig_file = self.ABI_FILE old_sigs = samba_utils.load_file(sig_file) if old_sigs is None or Options.options.ABI_UPDATE: if not save_sigs(sig_file, parsed_sigs): raise Errors.WafError('Failed to save ABI file "%s"' % sig_file) Logs.warn('Generated ABI signatures %s' % sig_file) return parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH) # check all old sigs got_error = False for s in parsed_old_sigs: if not s in parsed_sigs: Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % ( libname, s, parsed_old_sigs[s])) got_error = True elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]): Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % ( libname, s, parsed_old_sigs[s], parsed_sigs[s])) got_error = True for s in parsed_sigs: if not s in parsed_old_sigs: Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % ( libname, s, parsed_sigs[s])) got_error = True if got_error: raise Errors.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname) t = Task.task_factory('abi_check', abi_check_task, color='BLUE', ext_in='.bin') t.quiet = True # allow "waf --abi-check" to force re-checking the ABI if '--abi-check' in sys.argv: t.always_run = True @after('apply_link') @feature('abi_check') def abi_check(self): '''check that ABI matches saved signatures''' env = self.bld.env if not env.ABI_CHECK or self.abi_directory is None: return # if the platform doesn't support -fvisibility=hidden then the ABI # checks become fairly meaningless if not env.HAVE_VISIBILITY_ATTR: return topsrc = self.bld.srcnode.abspath() abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh') abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.version_libname, self.vnum) tsk = self.create_task('abi_check', self.link_task.outputs[0]) tsk.ABI_FILE = abi_file tsk.ABI_MATCH = self.abi_match tsk.ABI_GEN = abi_gen def abi_process_file(fname, version, symmap): '''process one ABI file, adding new symbols to the symmap''' for line in Utils.readf(fname).splitlines(): symname = line.split(":")[0] if not symname in symmap: symmap[symname] = version def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match): """Write a vscript file for a library in --version-script format. :param f: File-like object to write to :param libname: Name of the library, uppercased :param current_version: Current version :param versions: Versions to consider :param symmap: Dictionary mapping symbols -> version :param abi_match: List of symbols considered to be public in the current version """ invmap = {} for s in symmap: invmap.setdefault(symmap[s], []).append(s) last_key = "" versions = sorted(versions, key=version_key) for k in versions: symver = "%s_%s" % (libname, k) if symver == current_version: break f.write("%s {\n" % symver) if k in sorted(invmap.keys()): f.write("\tglobal:\n") for s in invmap.get(k, []): f.write("\t\t%s;\n" % s); f.write("}%s;\n\n" % last_key) last_key = " %s" % symver f.write("%s {\n" % current_version) local_abi = list(filter(lambda x: x[0] == '!', abi_match)) global_abi = list(filter(lambda x: x[0] != '!', abi_match)) f.write("\tglobal:\n") if len(global_abi) > 0: for x in global_abi: f.write("\t\t%s;\n" % x) else: f.write("\t\t*;\n") # Always hide symbols that must be local if exist local_abi.extend(["!_end", "!__bss_start", "!_edata"]) f.write("\tlocal:\n") for x in local_abi: f.write("\t\t%s;\n" % x[1:]) if global_abi != ["*"]: if len(global_abi) > 0: f.write("\t\t*;\n") f.write("};\n") def abi_build_vscript(task): '''generate a vscript file for our public libraries''' tgt = task.outputs[0].bldpath(task.env) symmap = {} versions = [] for f in task.inputs: fname = f.abspath(task.env) basename = os.path.basename(fname) version = basename[len(task.env.LIBNAME)+1:-len(".sigs")] versions.append(version) abi_process_file(fname, version, symmap) f = open(tgt, mode='w') try: abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions, symmap, task.env.ABI_MATCH) finally: f.close() def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None): '''generate a vscript file for our public libraries''' if abi_directory: source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname), flat=True) def abi_file_key(path): return version_key(path[:-len(".sigs")].rsplit("-")[-1]) source = sorted(source.split(), key=abi_file_key) else: source = '' libname = os.path.basename(libname) version = os.path.basename(version) libname = libname.replace("-", "_").replace("+","_").upper() version = version.replace("-", "_").replace("+","_").upper() t = bld.SAMBA_GENERATOR(vscript, rule=abi_build_vscript, source=source, group='vscripts', target=vscript) if abi_match is None: abi_match = ["*"] else: abi_match = samba_utils.TO_LIST(abi_match) t.env.ABI_MATCH = abi_match t.env.VERSION = version t.env.LIBNAME = libname t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH'] Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1156025 tevent-0.11.0/buildtools/wafsamba/samba_autoconf.py0000660000000000000000000010131300000000000022326 0ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section import os, sys from waflib import Build, Options, Logs, Context from waflib.Configure import conf from waflib.TaskGen import feature from waflib.Tools import c_preproc as preproc from samba_utils import TO_LIST, GET_TARGET_TYPE, SET_TARGET_TYPE, unique_list, mkdir_p missing_headers = set() #################################################### # some autoconf like helpers, to make the transition # to waf a bit easier for those used to autoconf # m4 files @conf def DEFINE(conf, d, v, add_to_cflags=False, quote=False): '''define a config option''' conf.define(d, v, quote=quote) if add_to_cflags: conf.env.append_value('CFLAGS', '-D%s=%s' % (d, str(v))) def hlist_to_string(conf, headers=None): '''convert a headers list to a set of #include lines''' hlist = conf.env.hlist if headers: hlist = hlist[:] hlist.extend(TO_LIST(headers)) hdrs = "\n".join('#include <%s>' % h for h in hlist) return hdrs @conf def COMPOUND_START(conf, msg): '''start a compound test''' def null_check_message_1(self,*k,**kw): return def null_check_message_2(self,*k,**kw): return v = getattr(conf.env, 'in_compound', []) if v != [] and v != 0: conf.env.in_compound = v + 1 return conf.start_msg(msg) conf.saved_check_message_1 = conf.start_msg conf.start_msg = null_check_message_1 conf.saved_check_message_2 = conf.end_msg conf.end_msg = null_check_message_2 conf.env.in_compound = 1 @conf def COMPOUND_END(conf, result): '''start a compound test''' conf.env.in_compound -= 1 if conf.env.in_compound != 0: return conf.start_msg = conf.saved_check_message_1 conf.end_msg = conf.saved_check_message_2 p = conf.end_msg if result is True: p('ok') elif not result: p('not found', 'YELLOW') else: p(result) @feature('nolink') def nolink(self): '''using the nolink type in conf.check() allows us to avoid the link stage of a test, thus speeding it up for tests that where linking is not needed''' pass def CHECK_HEADER(conf, h, add_headers=False, lib=None): '''check for a header''' if h in missing_headers and lib is None: return False d = h.upper().replace('/', '_') d = d.replace('.', '_') d = d.replace('-', '_') d = 'HAVE_%s' % d if CONFIG_SET(conf, d): if add_headers: if not h in conf.env.hlist: conf.env.hlist.append(h) return True (ccflags, ldflags, cpppath) = library_flags(conf, lib) hdrs = hlist_to_string(conf, headers=h) if lib is None: lib = "" ret = conf.check(fragment='%s\nint main(void) { return 0; }\n' % hdrs, type='nolink', execute=0, cflags=ccflags, mandatory=False, includes=cpppath, uselib=lib.upper(), msg="Checking for header %s" % h) if not ret: missing_headers.add(h) return False conf.DEFINE(d, 1) if add_headers and not h in conf.env.hlist: conf.env.hlist.append(h) return ret @conf def CHECK_HEADERS(conf, headers, add_headers=False, together=False, lib=None): '''check for a list of headers when together==True, then the headers accumulate within this test. This is useful for interdependent headers ''' ret = True if not add_headers and together: saved_hlist = conf.env.hlist[:] set_add_headers = True else: set_add_headers = add_headers for hdr in TO_LIST(headers): if not CHECK_HEADER(conf, hdr, set_add_headers, lib=lib): ret = False if not add_headers and together: conf.env.hlist = saved_hlist return ret def header_list(conf, headers=None, lib=None): '''form a list of headers which exist, as a string''' hlist=[] if headers is not None: for h in TO_LIST(headers): if CHECK_HEADER(conf, h, add_headers=False, lib=lib): hlist.append(h) return hlist_to_string(conf, headers=hlist) @conf def CHECK_TYPE(conf, t, alternate=None, headers=None, define=None, lib=None, msg=None): '''check for a single type''' if define is None: define = 'HAVE_' + t.upper().replace(' ', '_') if msg is None: msg='Checking for %s' % t ret = CHECK_CODE(conf, '%s _x' % t, define, execute=False, headers=headers, local_include=False, msg=msg, lib=lib, link=False) if not ret and alternate: conf.DEFINE(t, alternate) return ret @conf def CHECK_TYPES(conf, list, headers=None, define=None, alternate=None, lib=None): '''check for a list of types''' ret = True for t in TO_LIST(list): if not CHECK_TYPE(conf, t, headers=headers, define=define, alternate=alternate, lib=lib): ret = False return ret @conf def CHECK_TYPE_IN(conf, t, headers=None, alternate=None, define=None): '''check for a single type with a header''' return CHECK_TYPE(conf, t, headers=headers, alternate=alternate, define=define) @conf def CHECK_VARIABLE(conf, v, define=None, always=False, headers=None, msg=None, lib=None): '''check for a variable declaration (or define)''' if define is None: define = 'HAVE_%s' % v.upper() if msg is None: msg="Checking for variable %s" % v return CHECK_CODE(conf, # we need to make sure the compiler doesn't # optimize it out... ''' #ifndef %s void *_x; _x=(void *)&%s; return (int)_x; #endif return 0 ''' % (v, v), execute=False, link=False, msg=msg, local_include=False, lib=lib, headers=headers, define=define, always=always) @conf def CHECK_DECLS(conf, vars, reverse=False, headers=None, always=False): '''check a list of variable declarations, using the HAVE_DECL_xxx form of define When reverse==True then use HAVE_xxx_DECL instead of HAVE_DECL_xxx ''' ret = True for v in TO_LIST(vars): if not reverse: define='HAVE_DECL_%s' % v.upper() else: define='HAVE_%s_DECL' % v.upper() if not CHECK_VARIABLE(conf, v, define=define, headers=headers, msg='Checking for declaration of %s' % v, always=always): if not CHECK_CODE(conf, ''' return (int)%s; ''' % (v), execute=False, link=False, msg='Checking for declaration of %s (as enum)' % v, local_include=False, headers=headers, define=define, always=always): ret = False return ret def CHECK_FUNC(conf, f, link=True, lib=None, headers=None): '''check for a function''' define='HAVE_%s' % f.upper() ret = False in_lib_str = "" if lib: in_lib_str = " in %s" % lib conf.COMPOUND_START('Checking for %s%s' % (f, in_lib_str)) if link is None or link: ret = CHECK_CODE(conf, # this is based on the autoconf strategy ''' #define %s __fake__%s #ifdef HAVE_LIMITS_H # include #else # include #endif #undef %s #if defined __stub_%s || defined __stub___%s #error "bad glibc stub" #endif extern char %s(); int main() { return %s(); } ''' % (f, f, f, f, f, f, f), execute=False, link=True, addmain=False, add_headers=False, define=define, local_include=False, lib=lib, headers=headers, msg='Checking for %s' % f) if not ret: ret = CHECK_CODE(conf, # it might be a macro # we need to make sure the compiler doesn't # optimize it out... 'void *__x = (void *)%s; return (int)__x' % f, execute=False, link=True, addmain=True, add_headers=True, define=define, local_include=False, lib=lib, headers=headers, msg='Checking for macro %s' % f) if not ret and (link is None or not link): ret = CHECK_VARIABLE(conf, f, define=define, headers=headers, msg='Checking for declaration of %s' % f) conf.COMPOUND_END(ret) return ret @conf def CHECK_FUNCS(conf, list, link=True, lib=None, headers=None): '''check for a list of functions''' ret = True for f in TO_LIST(list): if not CHECK_FUNC(conf, f, link=link, lib=lib, headers=headers): ret = False return ret @conf def CHECK_SIZEOF(conf, vars, headers=None, define=None, critical=True): '''check the size of a type''' for v in TO_LIST(vars): v_define = define ret = False if v_define is None: v_define = 'SIZEOF_%s' % v.upper().replace(' ', '_') for size in list((1, 2, 4, 8, 16, 32, 64)): if CHECK_CODE(conf, 'static int test_array[1 - 2 * !(((long int)(sizeof(%s))) <= %d)];' % (v, size), define=v_define, quote=False, headers=headers, local_include=False, msg="Checking if size of %s == %d" % (v, size)): conf.DEFINE(v_define, size) ret = True break if not ret and critical: Logs.error("Couldn't determine size of '%s'" % v) sys.exit(1) return ret @conf def CHECK_VALUEOF(conf, v, headers=None, define=None): '''check the value of a variable/define''' ret = True v_define = define if v_define is None: v_define = 'VALUEOF_%s' % v.upper().replace(' ', '_') if CHECK_CODE(conf, 'printf("%%u", (unsigned)(%s))' % v, define=v_define, execute=True, define_ret=True, quote=False, headers=headers, local_include=False, msg="Checking value of %s" % v): return int(conf.env[v_define]) return None @conf def CHECK_CODE(conf, code, define, always=False, execute=False, addmain=True, add_headers=True, mandatory=False, headers=None, msg=None, cflags='', includes='# .', local_include=True, lib=None, link=True, define_ret=False, quote=False, on_target=True, strict=False): '''check if some code compiles and/or runs''' if CONFIG_SET(conf, define): return True if headers is not None: CHECK_HEADERS(conf, headers=headers, lib=lib) if add_headers: hdrs = header_list(conf, headers=headers, lib=lib) else: hdrs = '' if execute: execute = 1 else: execute = 0 if addmain: fragment='%s\n int main(void) { %s; return 0; }\n' % (hdrs, code) else: fragment='%s\n%s\n' % (hdrs, code) if msg is None: msg="Checking for %s" % define cflags = TO_LIST(cflags) # Be strict when relying on a compiler check # Some compilers (e.g. xlc) ignore non-supported features as warnings if strict: if 'WERROR_CFLAGS' in conf.env: cflags.extend(conf.env['WERROR_CFLAGS']) if local_include: cflags.append('-I%s' % conf.path.abspath()) if not link: type='nolink' else: type='cprogram' uselib = TO_LIST(lib) (ccflags, ldflags, cpppath) = library_flags(conf, uselib) includes = TO_LIST(includes) includes.extend(cpppath) uselib = [l.upper() for l in uselib] cflags.extend(ccflags) if on_target: test_args = conf.SAMBA_CROSS_ARGS(msg=msg) else: test_args = [] conf.COMPOUND_START(msg) try: ret = conf.check(fragment=fragment, execute=execute, define_name = define, cflags=cflags, ldflags=ldflags, includes=includes, uselib=uselib, type=type, msg=msg, quote=quote, test_args=test_args, define_ret=define_ret) except Exception: if always: conf.DEFINE(define, 0) else: conf.undefine(define) conf.COMPOUND_END(False) if mandatory: raise return False else: # Success is indicated by ret but we should unset # defines set by WAF's c_config.check() because it # defines it to int(ret) and we want to undefine it if not ret: conf.undefine(define) conf.COMPOUND_END(False) return False if not define_ret: conf.DEFINE(define, 1) conf.COMPOUND_END(True) else: conf.DEFINE(define, ret, quote=quote) conf.COMPOUND_END(ret) return True @conf def CHECK_STRUCTURE_MEMBER(conf, structname, member, always=False, define=None, headers=None, lib=None): '''check for a structure member''' if define is None: define = 'HAVE_%s' % member.upper() return CHECK_CODE(conf, '%s s; void *_x; _x=(void *)&s.%s' % (structname, member), define, execute=False, link=False, lib=lib, always=always, headers=headers, local_include=False, msg="Checking for member %s in %s" % (member, structname)) @conf def CHECK_CFLAGS(conf, cflags, fragment='int main(void) { return 0; }\n', mandatory=False): '''check if the given cflags are accepted by the compiler ''' check_cflags = TO_LIST(cflags) if 'WERROR_CFLAGS' in conf.env: check_cflags.extend(conf.env['WERROR_CFLAGS']) return conf.check(fragment=fragment, execute=0, mandatory=mandatory, type='nolink', cflags=check_cflags, msg="Checking compiler accepts %s" % cflags) @conf def CHECK_LDFLAGS(conf, ldflags, mandatory=False): '''check if the given ldflags are accepted by the linker ''' return conf.check(fragment='int main(void) { return 0; }\n', execute=0, ldflags=ldflags, mandatory=mandatory, msg="Checking linker accepts %s" % ldflags) @conf def CONFIG_GET(conf, option): '''return True if a configuration option was found''' if (option in conf.env): return conf.env[option] else: return None @conf def CONFIG_SET(conf, option): '''return True if a configuration option was found''' if option not in conf.env: return False v = conf.env[option] if v is None: return False if v == []: return False if v == (): return False return True @conf def CONFIG_RESET(conf, option): if option not in conf.env: return del conf.env[option] Build.BuildContext.CONFIG_RESET = CONFIG_RESET Build.BuildContext.CONFIG_SET = CONFIG_SET Build.BuildContext.CONFIG_GET = CONFIG_GET def library_flags(self, libs): '''work out flags from pkg_config''' ccflags = [] ldflags = [] cpppath = [] for lib in TO_LIST(libs): # note that we do not add the -I and -L in here, as that is added by the waf # core. Adding it here would just change the order that it is put on the link line # which can cause system paths to be added before internal libraries extra_ccflags = TO_LIST(getattr(self.env, 'CFLAGS_%s' % lib.upper(), [])) extra_ldflags = TO_LIST(getattr(self.env, 'LDFLAGS_%s' % lib.upper(), [])) extra_cpppath = TO_LIST(getattr(self.env, 'CPPPATH_%s' % lib.upper(), [])) ccflags.extend(extra_ccflags) ldflags.extend(extra_ldflags) cpppath.extend(extra_cpppath) extra_cpppath = TO_LIST(getattr(self.env, 'INCLUDES_%s' % lib.upper(), [])) cpppath.extend(extra_cpppath) if 'EXTRA_LDFLAGS' in self.env: ldflags.extend(self.env['EXTRA_LDFLAGS']) ccflags = unique_list(ccflags) ldflags = unique_list(ldflags) cpppath = unique_list(cpppath) return (ccflags, ldflags, cpppath) @conf def CHECK_LIB(conf, libs, mandatory=False, empty_decl=True, set_target=True, shlib=False): '''check if a set of libraries exist as system libraries returns the sublist of libs that do exist as a syslib or [] ''' fragment= ''' int foo() { int v = 2; return v*2; } ''' ret = [] liblist = TO_LIST(libs) for lib in liblist[:]: if GET_TARGET_TYPE(conf, lib) == 'SYSLIB': ret.append(lib) continue (ccflags, ldflags, cpppath) = library_flags(conf, lib) if shlib: res = conf.check(features='c cshlib', fragment=fragment, lib=lib, uselib_store=lib, cflags=ccflags, ldflags=ldflags, uselib=lib.upper(), mandatory=False) else: res = conf.check(lib=lib, uselib_store=lib, cflags=ccflags, ldflags=ldflags, uselib=lib.upper(), mandatory=False) if not res: if mandatory: Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list)) sys.exit(1) if empty_decl: # if it isn't a mandatory library, then remove it from dependency lists if set_target: SET_TARGET_TYPE(conf, lib, 'EMPTY') else: conf.define('HAVE_LIB%s' % lib.upper().replace('-','_').replace('.','_'), 1) conf.env['LIB_' + lib.upper()] = lib if set_target: conf.SET_TARGET_TYPE(lib, 'SYSLIB') ret.append(lib) return ret @conf def CHECK_FUNCS_IN(conf, list, library, mandatory=False, checklibc=False, headers=None, link=True, empty_decl=True, set_target=True): """ check that the functions in 'list' are available in 'library' if they are, then make that library available as a dependency if the library is not available and mandatory==True, then raise an error. If the library is not available and mandatory==False, then add the library to the list of dependencies to remove from build rules optionally check for the functions first in libc """ remaining = TO_LIST(list) liblist = TO_LIST(library) # check if some already found for f in remaining[:]: if CONFIG_SET(conf, 'HAVE_%s' % f.upper()): remaining.remove(f) # see if the functions are in libc if checklibc: for f in remaining[:]: if CHECK_FUNC(conf, f, link=True, headers=headers): remaining.remove(f) if remaining == []: for lib in liblist: if GET_TARGET_TYPE(conf, lib) != 'SYSLIB' and empty_decl: SET_TARGET_TYPE(conf, lib, 'EMPTY') return True checklist = conf.CHECK_LIB(liblist, empty_decl=empty_decl, set_target=set_target) for lib in liblist[:]: if not lib in checklist and mandatory: Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list)) sys.exit(1) ret = True for f in remaining: if not CHECK_FUNC(conf, f, lib=' '.join(checklist), headers=headers, link=link): ret = False return ret @conf def IN_LAUNCH_DIR(conf): '''return True if this rule is being run from the launch directory''' return os.path.realpath(conf.path.abspath()) == os.path.realpath(Context.launch_dir) Options.OptionsContext.IN_LAUNCH_DIR = IN_LAUNCH_DIR @conf def SAMBA_CONFIG_H(conf, path=None): '''write out config.h in the right directory''' # we don't want to produce a config.h in places like lib/replace # when we are building projects that depend on lib/replace if not IN_LAUNCH_DIR(conf): return # we need to build real code that can't be optimized away to test stack_protect_list = ['-fstack-protector-strong', '-fstack-protector'] for stack_protect_flag in stack_protect_list: flag_supported = conf.check(fragment=''' #include int main(void) { char t[100000]; while (fgets(t, sizeof(t), stdin)); return 0; } ''', execute=0, cflags=[ '-Werror', '-Wp,-D_FORTIFY_SOURCE=2', stack_protect_flag], mandatory=False, msg='Checking if compiler accepts %s' % (stack_protect_flag)) if flag_supported: conf.ADD_CFLAGS('%s' % (stack_protect_flag)) break flag_supported = conf.check(fragment=''' #include int main(void) { char t[100000]; while (fgets(t, sizeof(t), stdin)); return 0; } ''', execute=0, cflags=[ '-Werror', '-fstack-clash-protection'], mandatory=False, msg='Checking if compiler accepts -fstack-clash-protection') if flag_supported: conf.ADD_CFLAGS('-fstack-clash-protection') if Options.options.debug: conf.ADD_CFLAGS('-g', testflags=True) if Options.options.pidl_developer: conf.env.PIDL_DEVELOPER_MODE = True if Options.options.developer: conf.env.DEVELOPER_MODE = True conf.ADD_CFLAGS('-g', testflags=True) conf.ADD_CFLAGS('-Wall', testflags=True) conf.ADD_CFLAGS('-Wshadow', testflags=True) conf.ADD_CFLAGS('-Wmissing-prototypes', testflags=True) if CHECK_CODE(conf, 'struct a { int b; }; struct c { struct a d; } e = { };', 'CHECK_C99_INIT', link=False, cflags='-Wmissing-field-initializers -Werror=missing-field-initializers', msg="Checking C99 init of nested structs."): conf.ADD_CFLAGS('-Wmissing-field-initializers', testflags=True) conf.ADD_CFLAGS('-Wformat-overflow=2', testflags=True) conf.ADD_CFLAGS('-Wformat-zero-length', testflags=True) conf.ADD_CFLAGS('-Wcast-align -Wcast-qual', testflags=True) conf.ADD_CFLAGS('-fno-common', testflags=True) conf.ADD_CFLAGS('-Werror=address', testflags=True) # we add these here to ensure that -Wstrict-prototypes is not set during configure conf.ADD_CFLAGS('-Werror=strict-prototypes -Wstrict-prototypes', testflags=True) conf.ADD_CFLAGS('-Werror=write-strings -Wwrite-strings', testflags=True) conf.ADD_CFLAGS('-Werror-implicit-function-declaration', testflags=True) conf.ADD_CFLAGS('-Werror=pointer-arith -Wpointer-arith', testflags=True) conf.ADD_CFLAGS('-Werror=declaration-after-statement -Wdeclaration-after-statement', testflags=True) conf.ADD_CFLAGS('-Werror=return-type -Wreturn-type', testflags=True) conf.ADD_CFLAGS('-Werror=uninitialized -Wuninitialized', testflags=True) conf.ADD_CFLAGS('-Wimplicit-fallthrough', testflags=True) conf.ADD_CFLAGS('-Werror=strict-overflow -Wstrict-overflow=2', testflags=True) conf.ADD_CFLAGS('-Wformat=2 -Wno-format-y2k', testflags=True) conf.ADD_CFLAGS('-Wno-format-zero-length', testflags=True) conf.ADD_CFLAGS('-Werror=format-security -Wformat-security', testflags=True, prereq_flags='-Wformat') # This check is because for ldb_search(), a NULL format string # is not an error, but some compilers complain about that. if CHECK_CFLAGS(conf, ["-Werror=format", "-Wformat=2"], ''' int testformat(char *format, ...) __attribute__ ((format (__printf__, 1, 2))); int main(void) { testformat(0); return 0; } '''): if not 'EXTRA_CFLAGS' in conf.env: conf.env['EXTRA_CFLAGS'] = [] conf.env['EXTRA_CFLAGS'].extend(TO_LIST("-Werror=format")) if not Options.options.disable_warnings_as_errors: conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Werror -Wno-error=deprecated-declarations', testflags=True) conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Wno-error=tautological-compare', testflags=True) conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Wno-error=cast-align', testflags=True) if Options.options.fatal_errors: conf.ADD_CFLAGS('-Wfatal-errors', testflags=True) if Options.options.pedantic: conf.ADD_CFLAGS('-W', testflags=True) if (Options.options.address_sanitizer or Options.options.undefined_sanitizer): conf.ADD_CFLAGS('-g -O1', testflags=True) if Options.options.address_sanitizer: conf.ADD_CFLAGS('-fno-omit-frame-pointer', testflags=True) conf.ADD_CFLAGS('-fsanitize=address', testflags=True) conf.ADD_LDFLAGS('-fsanitize=address', testflags=True) conf.env['ADDRESS_SANITIZER'] = True if Options.options.undefined_sanitizer: conf.ADD_CFLAGS('-fsanitize=undefined', testflags=True) conf.ADD_CFLAGS('-fsanitize=null', testflags=True) conf.ADD_CFLAGS('-fsanitize=alignment', testflags=True) conf.ADD_LDFLAGS('-fsanitize=undefined', testflags=True) conf.env['UNDEFINED_SANITIZER'] = True # Let people pass an additional ADDITIONAL_{CFLAGS,LDFLAGS} # environment variables which are only used the for final build. # # The CFLAGS and LDFLAGS environment variables are also # used for the configure checks which might impact their results. # # If these variables don't pass a smoke test, fail the configure conf.add_os_flags('ADDITIONAL_CFLAGS') if conf.env.ADDITIONAL_CFLAGS: conf.CHECK_CFLAGS(conf.env['ADDITIONAL_CFLAGS'], mandatory=True) conf.env['EXTRA_CFLAGS'].extend(conf.env['ADDITIONAL_CFLAGS']) conf.add_os_flags('ADDITIONAL_LDFLAGS') if conf.env.ADDITIONAL_LDFLAGS: conf.CHECK_LDFLAGS(conf.env['ADDITIONAL_LDFLAGS'], mandatory=True) conf.env['EXTRA_LDFLAGS'].extend(conf.env['ADDITIONAL_LDFLAGS']) if path is None: conf.write_config_header('default/config.h', top=True, remove=False) else: conf.write_config_header(os.path.join(conf.variant, path), remove=False) for key in conf.env.define_key: conf.undefine(key, from_env=False) conf.env.define_key = [] conf.SAMBA_CROSS_CHECK_COMPLETE() @conf def CONFIG_PATH(conf, name, default): '''setup a configurable path''' if not name in conf.env: if default[0] == '/': conf.env[name] = default else: conf.env[name] = conf.env['PREFIX'] + default @conf def ADD_NAMED_CFLAGS(conf, name, flags, testflags=False, prereq_flags=[]): '''add some CFLAGS to the command line optionally set testflags to ensure all the flags work ''' prereq_flags = TO_LIST(prereq_flags) if testflags: ok_flags=[] for f in flags.split(): if CHECK_CFLAGS(conf, [f] + prereq_flags): ok_flags.append(f) flags = ok_flags if not name in conf.env: conf.env[name] = [] conf.env[name].extend(TO_LIST(flags)) @conf def ADD_CFLAGS(conf, flags, testflags=False, prereq_flags=[]): '''add some CFLAGS to the command line optionally set testflags to ensure all the flags work ''' ADD_NAMED_CFLAGS(conf, 'EXTRA_CFLAGS', flags, testflags=testflags, prereq_flags=prereq_flags) @conf def ADD_LDFLAGS(conf, flags, testflags=False): '''add some LDFLAGS to the command line optionally set testflags to ensure all the flags work this will return the flags that are added, if any ''' if testflags: ok_flags=[] for f in flags.split(): if CHECK_LDFLAGS(conf, f): ok_flags.append(f) flags = ok_flags if not 'EXTRA_LDFLAGS' in conf.env: conf.env['EXTRA_LDFLAGS'] = [] conf.env['EXTRA_LDFLAGS'].extend(TO_LIST(flags)) return flags @conf def ADD_EXTRA_INCLUDES(conf, includes): '''add some extra include directories to all builds''' if not 'EXTRA_INCLUDES' in conf.env: conf.env['EXTRA_INCLUDES'] = [] conf.env['EXTRA_INCLUDES'].extend(TO_LIST(includes)) def CURRENT_CFLAGS(bld, target, cflags, allow_warnings=False, use_hostcc=False, hide_symbols=False): '''work out the current flags. local flags are added first''' ret = [] if use_hostcc: ret += ['-D_SAMBA_HOSTCC_'] ret += TO_LIST(cflags) if not 'EXTRA_CFLAGS' in bld.env: list = [] else: list = bld.env['EXTRA_CFLAGS']; ret.extend(list) if not allow_warnings and 'PICKY_CFLAGS' in bld.env: list = bld.env['PICKY_CFLAGS']; ret.extend(list) if hide_symbols and bld.env.HAVE_VISIBILITY_ATTR: ret.append(bld.env.VISIBILITY_CFLAGS) return ret @conf def CHECK_CC_ENV(conf): """trim whitespaces from 'CC'. The build farm sometimes puts a space at the start""" if os.environ.get('CC'): conf.env.CC = TO_LIST(os.environ.get('CC')) @conf def SETUP_CONFIGURE_CACHE(conf, enable): '''enable/disable cache of configure results''' if enable: # when -C is chosen, we will use a private cache and will # not look into system includes. This roughtly matches what # autoconf does with -C cache_path = os.path.join(conf.bldnode.abspath(), '.confcache') mkdir_p(cache_path) Options.cache_global = os.environ['WAFCACHE'] = cache_path else: # when -C is not chosen we will not cache configure checks # We set the recursion limit low to prevent waf from spending # a lot of time on the signatures of the files. Options.cache_global = os.environ['WAFCACHE'] = '' preproc.recursion_limit = 1 # in either case we don't need to scan system includes preproc.go_absolute = False @conf def SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS(conf): if Options.options.address_sanitizer or Options.options.enable_libfuzzer: # Sanitizers can rely on symbols undefined at library link time and the # symbols used for fuzzers are only defined by compiler wrappers. return if not sys.platform.startswith("openbsd"): # we don't want any libraries or modules to rely on runtime # resolution of symbols conf.env.undefined_ldflags = conf.ADD_LDFLAGS('-Wl,-no-undefined', testflags=True) if (conf.env.undefined_ignore_ldflags == [] and conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup'])): conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_autoproto.py0000660000000000000000000000150500000000000022546 0ustar00rootroot00000000000000# waf build tool for building automatic prototypes from C source import os from waflib import Build from samba_utils import SET_TARGET_TYPE def SAMBA_AUTOPROTO(bld, header, source): '''rule for samba prototype generation''' bld.SET_BUILD_GROUP('prototypes') relpath = os.path.relpath(bld.path.abspath(), bld.srcnode.abspath()) name = os.path.join(relpath, header) SET_TARGET_TYPE(bld, name, 'PROTOTYPE') t = bld( name = name, source = source, target = header, update_outputs=True, ext_out='.c', before ='c', rule = '${PERL} "${SCRIPT}/mkproto.pl" --srcdir=.. --builddir=. --public=/dev/null --private="${TGT}" ${SRC}' ) t.env.SCRIPT = os.path.join(bld.srcnode.abspath(), 'source4/script') Build.BuildContext.SAMBA_AUTOPROTO = SAMBA_AUTOPROTO ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_bundled.py0000660000000000000000000002257300000000000022137 0ustar00rootroot00000000000000# functions to support bundled libraries import sys from waflib import Build, Options, Logs from waflib.Configure import conf from wafsamba import samba_utils def PRIVATE_NAME(bld, name, private_extension, private_library): '''possibly rename a library to include a bundled extension''' if not private_library: return name # we now use the same private name for libraries as the public name. # see http://git.samba.org/?p=tridge/junkcode.git;a=tree;f=shlib for a # demonstration that this is the right thing to do # also see http://lists.samba.org/archive/samba-technical/2011-January/075816.html if private_extension: return name extension = bld.env.PRIVATE_EXTENSION if extension and name.startswith('%s' % extension): return name if extension and name.endswith('%s' % extension): return name return "%s-%s" % (name, extension) def target_in_list(target, lst, default): for l in lst: if target == l: return True if '!' + target == l: return False if l == 'ALL': return True if l == 'NONE': return False return default def BUILTIN_LIBRARY(bld, name): '''return True if a library should be builtin instead of being built as a shared lib''' return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False) Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY def BUILTIN_DEFAULT(opt, builtins): '''set a comma separated default list of builtin libraries for this package''' if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options.__dict__: return Options.options.__dict__['BUILTIN_LIBRARIES_DEFAULT'] = builtins Options.OptionsContext.BUILTIN_DEFAULT = BUILTIN_DEFAULT def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''): '''set a default private library extension''' if 'PRIVATE_EXTENSION_DEFAULT' in Options.options.__dict__: return Options.options.__dict__['PRIVATE_EXTENSION_DEFAULT'] = extension Options.options.__dict__['PRIVATE_EXTENSION_EXCEPTION'] = noextension Options.OptionsContext.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT def minimum_library_version(conf, libname, default): '''allow override of mininum system library version''' minlist = Options.options.MINIMUM_LIBRARY_VERSION if not minlist: return default for m in minlist.split(','): a = m.split(':') if len(a) != 2: Logs.error("Bad syntax for --minimum-library-version of %s" % m) sys.exit(1) if a[0] == libname: return a[1] return default @conf def LIB_MAY_BE_BUNDLED(conf, libname): if libname in conf.env.SYSTEM_LIBS: return False if libname in conf.env.BUNDLED_LIBS: return True if '!%s' % libname in conf.env.BUNDLED_LIBS: return False if 'NONE' in conf.env.BUNDLED_LIBS: return False return True @conf def LIB_MUST_BE_BUNDLED(conf, libname): if libname in conf.env.BUNDLED_LIBS: return True if '!%s' % libname in conf.env.BUNDLED_LIBS: return False if 'ALL' in conf.env.BUNDLED_LIBS: return True return False @conf def LIB_MUST_BE_PRIVATE(conf, libname): return ('ALL' in conf.env.PRIVATE_LIBS or libname in conf.env.PRIVATE_LIBS) @conf def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0', maxversion=None, version_blacklist=[], onlyif=None, implied_deps=None, pkg=None): '''check if a library is available as a system library. This only tries using pkg-config ''' return conf.CHECK_BUNDLED_SYSTEM(libname, minversion=minversion, maxversion=maxversion, version_blacklist=version_blacklist, onlyif=onlyif, implied_deps=implied_deps, pkg=pkg) @conf def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0', maxversion=None, version_blacklist=[], checkfunctions=None, headers=None, checkcode=None, onlyif=None, implied_deps=None, require_headers=True, pkg=None, set_target=True): '''check if a library is available as a system library. this first tries via pkg-config, then if that fails tries by testing for a specified function in the specified lib ''' # We always do a logic validation of 'onlyif' first missing = [] if onlyif: for l in samba_utils.TO_LIST(onlyif): f = 'FOUND_SYSTEMLIB_%s' % l if not f in conf.env: Logs.error('ERROR: CHECK_BUNDLED_SYSTEM(%s) - ' % (libname) + 'missing prerequisite check for ' + 'system library %s, onlyif=%r' % (l, onlyif)) sys.exit(1) if not conf.env[f]: missing.append(l) found = 'FOUND_SYSTEMLIB_%s' % libname if found in conf.env: return conf.env[found] if conf.LIB_MUST_BE_BUNDLED(libname): conf.env[found] = False return False # see if the library should only use a system version if another dependent # system version is found. That prevents possible use of mixed library # versions if missing: if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing)) sys.exit(1) conf.env[found] = False return False def check_functions_headers_code(): '''helper function for CHECK_BUNDLED_SYSTEM''' if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname): return False if checkfunctions is not None: ok = conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers, empty_decl=False, set_target=False) if not ok: return False if checkcode is not None: define='CHECK_BUNDLED_SYSTEM_%s' % libname.upper() ok = conf.CHECK_CODE(checkcode, lib=libname, headers=headers, local_include=False, msg=msg, define=define) conf.CONFIG_RESET(define) if not ok: return False return True minversion = minimum_library_version(conf, libname, minversion) msg = 'Checking for system %s' % libname msg_ver = [] if minversion != '0.0.0': msg_ver.append('>=%s' % minversion) if maxversion is not None: msg_ver.append('<=%s' % maxversion) for v in version_blacklist: msg_ver.append('!=%s' % v) if msg_ver != []: msg += " (%s)" % (" ".join(msg_ver)) uselib_store=libname.upper() if pkg is None: pkg = libname version_checks = '%s >= %s' % (pkg, minversion) if maxversion is not None: version_checks += ' %s <= %s' % (pkg, maxversion) version_checks += "".join(' %s != %s' % (pkg, v) for v in version_blacklist) # try pkgconfig first if (conf.CHECK_CFG(package=pkg, args='"%s" --cflags --libs' % (version_checks), msg=msg, uselib_store=uselib_store) and check_functions_headers_code()): if set_target: conf.SET_TARGET_TYPE(libname, 'SYSLIB') conf.env[found] = True if implied_deps: conf.SET_SYSLIB_DEPS(libname, implied_deps) return True if checkfunctions is not None: if check_functions_headers_code(): conf.env[found] = True if implied_deps: conf.SET_SYSLIB_DEPS(libname, implied_deps) if set_target: conf.SET_TARGET_TYPE(libname, 'SYSLIB') return True conf.env[found] = False if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion)) sys.exit(1) return False def tuplize_version(version): return tuple([int(x) for x in version.split(".")]) @conf def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'): '''check if a python module is available on the system and has the specified minimum version. ''' if conf.LIB_MUST_BE_BUNDLED(libname): return False # see if the library should only use a system version if another dependent # system version is found. That prevents possible use of mixed library # versions minversion = minimum_library_version(conf, libname, minversion) try: m = __import__(modulename) except ImportError: found = False else: try: version = m.__version__ except AttributeError: found = False else: found = tuplize_version(version) >= tuplize_version(minversion) if not found and not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion)) sys.exit(1) return found def NONSHARED_BINARY(bld, name): '''return True if a binary should be built without non-system shared libs''' return target_in_list(name, bld.env.NONSHARED_BINARIES, False) Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/samba_conftests.py0000660000000000000000000004025200000000000022524 0ustar00rootroot00000000000000# a set of config tests that use the samba_autoconf functions # to test for commonly needed configuration options import os, shutil, re from waflib import Build, Configure, Utils, Options, Logs, Errors from waflib.Configure import conf from samba_utils import TO_LIST, ADD_LD_LIBRARY_PATH, get_string def add_option(self, *k, **kw): '''syntax help: provide the "match" attribute to opt.add_option() so that folders can be added to specific config tests''' Options.OptionsContext.parser = self match = kw.get('match', []) if match: del kw['match'] opt = self.parser.add_option(*k, **kw) opt.match = match return opt Options.OptionsContext.add_option = add_option @conf def check(self, *k, **kw): '''Override the waf defaults to inject --with-directory options''' if not 'env' in kw: kw['env'] = self.env.derive() # match the configuration test with specific options, for example: # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv" additional_dirs = [] if 'msg' in kw: msg = kw['msg'] for x in Options.OptionsContext.parser.parser.option_list: if getattr(x, 'match', None) and msg in x.match: d = getattr(Options.options, x.dest, '') if d: additional_dirs.append(d) # we add the additional dirs twice: once for the test data, and again if the compilation test suceeds below def add_options_dir(dirs, env): for x in dirs: if not x in env.CPPPATH: env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH if not x in env.LIBPATH: env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH add_options_dir(additional_dirs, kw['env']) self.validate_c(kw) self.start_msg(kw['msg']) ret = None try: ret = self.run_c_code(*k, **kw) except Configure.ConfigurationError as e: self.end_msg(kw['errmsg'], 'YELLOW') if 'mandatory' in kw and kw['mandatory']: if Logs.verbose > 1: raise else: self.fatal('the configuration failed (see %r)' % self.log.name) else: kw['success'] = ret self.end_msg(self.ret_msg(kw['okmsg'], kw)) # success! keep the CPPPATH/LIBPATH add_options_dir(additional_dirs, self.env) self.post_check(*k, **kw) if not kw.get('execute', False): return ret == 0 return ret @conf def CHECK_ICONV(conf, define='HAVE_NATIVE_ICONV'): '''check if the iconv library is installed optionally pass a define''' if conf.CHECK_FUNCS_IN('iconv_open', 'iconv', checklibc=True, headers='iconv.h'): conf.DEFINE(define, 1) return True return False @conf def CHECK_LARGEFILE(conf, define='HAVE_LARGEFILE'): '''see what we need for largefile support''' getconf_cflags = conf.CHECK_COMMAND(['getconf', 'LFS_CFLAGS']); if getconf_cflags is not False: if (conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define='WORKING_GETCONF_LFS_CFLAGS', execute=True, cflags=getconf_cflags, msg='Checking getconf large file support flags work')): conf.ADD_CFLAGS(getconf_cflags) getconf_cflags_list=TO_LIST(getconf_cflags) for flag in getconf_cflags_list: if flag[:2] == "-D": flag_split = flag[2:].split('=') if len(flag_split) == 1: conf.DEFINE(flag_split[0], '1') else: conf.DEFINE(flag_split[0], flag_split[1]) if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define, execute=True, msg='Checking for large file support without additional flags'): return True if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define, execute=True, cflags='-D_FILE_OFFSET_BITS=64', msg='Checking for -D_FILE_OFFSET_BITS=64'): conf.DEFINE('_FILE_OFFSET_BITS', 64) return True if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define, execute=True, cflags='-D_LARGE_FILES', msg='Checking for -D_LARGE_FILES'): conf.DEFINE('_LARGE_FILES', 1) return True return False @conf def CHECK_C_PROTOTYPE(conf, function, prototype, define, headers=None, msg=None): '''verify that a C prototype matches the one on the current system''' if not conf.CHECK_DECLS(function, headers=headers): return False if not msg: msg = 'Checking C prototype for %s' % function return conf.CHECK_CODE('%s; void *_x = (void *)%s' % (prototype, function), define=define, local_include=False, headers=headers, link=False, execute=False, msg=msg) @conf def CHECK_CHARSET_EXISTS(conf, charset, outcharset='UCS-2LE', headers=None, define=None): '''check that a named charset is able to be used with iconv_open() for conversion to a target charset ''' msg = 'Checking if can we convert from %s to %s' % (charset, outcharset) if define is None: define = 'HAVE_CHARSET_%s' % charset.upper().replace('-','_') return conf.CHECK_CODE(''' iconv_t cd = iconv_open("%s", "%s"); if (cd == 0 || cd == (iconv_t)-1) return -1; ''' % (charset, outcharset), define=define, execute=True, msg=msg, lib='iconv', headers=headers) def find_config_dir(conf): '''find a directory to run tests in''' k = 0 while k < 10000: dir = os.path.join(conf.bldnode.abspath(), '.conf_check_%d' % k) try: shutil.rmtree(dir) except OSError: pass try: os.stat(dir) except: break k += 1 try: os.makedirs(dir) except: conf.fatal('cannot create a configuration test folder %r' % dir) try: os.stat(dir) except: conf.fatal('cannot use the configuration test folder %r' % dir) return dir @conf def CHECK_SHLIB_INTRASINC_NAME_FLAGS(conf, msg): ''' check if the waf default flags for setting the name of lib are ok ''' snip = ''' int foo(int v) { return v * 2; } ''' return conf.check(features='c cshlib',vnum="1",fragment=snip,msg=msg, mandatory=False) @conf def CHECK_NEED_LC(conf, msg): '''check if we need -lc''' dir = find_config_dir(conf) env = conf.env bdir = os.path.join(dir, 'testbuild2') if not os.path.exists(bdir): os.makedirs(bdir) subdir = os.path.join(dir, "liblctest") os.makedirs(subdir) Utils.writef(os.path.join(subdir, 'liblc1.c'), '#include \nint lib_func(void) { FILE *f = fopen("foo", "r");}\n') bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) bld(features='c cshlib', source='liblctest/liblc1.c', ldflags=conf.env['EXTRA_LDFLAGS'], target='liblc', name='liblc') try: bld.compile() conf.check_message(msg, '', True) return True except: conf.check_message(msg, '', False) return False @conf def CHECK_SHLIB_W_PYTHON(conf, msg): '''check if we need -undefined dynamic_lookup''' dir = find_config_dir(conf) snip = ''' #include #include #define environ (*_NSGetEnviron()) static PyObject *ldb_module = NULL; int foo(int v) { extern char **environ; environ[0] = 1; ldb_module = PyImport_ImportModule("ldb"); return v * 2; } ''' return conf.check(features='c cshlib',uselib='PYEMBED',fragment=snip,msg=msg, mandatory=False) # this one is quite complex, and should probably be broken up # into several parts. I'd quite like to create a set of CHECK_COMPOUND() # functions that make writing complex compound tests like this much easier @conf def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): '''see if the platform supports building libraries''' if msg is None: if rpath: msg = "rpath library support" else: msg = "building library support" dir = find_config_dir(conf) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) env = conf.env subdir = os.path.join(dir, "libdir") os.makedirs(subdir) Utils.writef(os.path.join(subdir, 'lib1.c'), 'int lib_func(void) { return 42; }\n') Utils.writef(os.path.join(dir, 'main.c'), 'int lib_func(void);\n' 'int main(void) {return !(lib_func() == 42);}\n') bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) ldflags = [] if version_script: ldflags.append("-Wl,--version-script=%s/vscript" % bld.path.abspath()) Utils.writef(os.path.join(dir,'vscript'), 'TEST_1.0A2 { global: *; };\n') bld(features='c cshlib', source='libdir/lib1.c', target='libdir/lib1', ldflags=ldflags, name='lib1') o = bld(features='c cprogram', source='main.c', target='prog1', uselib_local='lib1') if rpath: o.rpath=os.path.join(bdir, 'default/libdir') # compile the program try: bld.compile() except: conf.check_message(msg, '', False) return False # path for execution lastprog = o.link_task.outputs[0].abspath(env) if not rpath: if 'LD_LIBRARY_PATH' in os.environ: old_ld_library_path = os.environ['LD_LIBRARY_PATH'] else: old_ld_library_path = None ADD_LD_LIBRARY_PATH(os.path.join(bdir, 'default/libdir')) # we need to run the program, try to get its result args = conf.SAMBA_CROSS_ARGS(msg=msg) proc = Utils.subprocess.Popen([lastprog] + args, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE) (out, err) = proc.communicate() w = conf.log.write w(str(out)) w('\n') w(str(err)) w('\nreturncode %r\n' % proc.returncode) ret = (proc.returncode == 0) if not rpath: os.environ['LD_LIBRARY_PATH'] = old_ld_library_path or '' conf.check_message(msg, '', ret) return ret @conf def CHECK_PERL_MANPAGE(conf, msg=None, section=None): '''work out what extension perl uses for manpages''' if msg is None: if section: msg = "perl man%s extension" % section else: msg = "perl manpage generation" conf.start_msg(msg) dir = find_config_dir(conf) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) Utils.writef(os.path.join(bdir, 'Makefile.PL'), """ use ExtUtils::MakeMaker; WriteMakefile( 'NAME' => 'WafTest', 'EXE_FILES' => [ 'WafTest' ] ); """) back = os.path.abspath('.') os.chdir(bdir) proc = Utils.subprocess.Popen(['perl', 'Makefile.PL'], stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE) (out, err) = proc.communicate() os.chdir(back) ret = (proc.returncode == 0) if not ret: conf.end_msg('not found', color='YELLOW') return if section: man = Utils.readf(os.path.join(bdir,'Makefile')) m = re.search('MAN%sEXT\s+=\s+(\w+)' % section, man) if not m: conf.end_msg('not found', color='YELLOW') return ext = m.group(1) conf.end_msg(ext) return ext conf.end_msg('ok') return True @conf def CHECK_COMMAND(conf, cmd, msg=None, define=None, on_target=True, boolean=False): '''run a command and return result''' if msg is None: msg = 'Checking %s' % ' '.join(cmd) conf.COMPOUND_START(msg) cmd = cmd[:] if on_target: cmd.extend(conf.SAMBA_CROSS_ARGS(msg=msg)) try: ret = get_string(Utils.cmd_output(cmd)) except: conf.COMPOUND_END(False) return False if boolean: conf.COMPOUND_END('ok') if define: conf.DEFINE(define, '1') else: ret = ret.strip() conf.COMPOUND_END(ret) if define: conf.DEFINE(define, ret, quote=True) return ret @conf def CHECK_UNAME(conf): '''setup SYSTEM_UNAME_* defines''' ret = True for v in "sysname machine release version".split(): if not conf.CHECK_CODE(''' int printf(const char *format, ...); struct utsname n; if (uname(&n) == -1) return -1; printf("%%s", n.%s); ''' % v, define='SYSTEM_UNAME_%s' % v.upper(), execute=True, define_ret=True, quote=True, headers='sys/utsname.h', local_include=False, msg="Checking uname %s type" % v): ret = False return ret @conf def CHECK_INLINE(conf): '''check for the right value for inline''' conf.COMPOUND_START('Checking for inline') for i in ['inline', '__inline__', '__inline']: ret = conf.CHECK_CODE(''' typedef int foo_t; static %s foo_t static_foo () {return 0; } %s foo_t foo () {return 0; }\n''' % (i, i), define='INLINE_MACRO', addmain=False, link=False) if ret: if i != 'inline': conf.DEFINE('inline', i, quote=False) break if not ret: conf.COMPOUND_END(ret) else: conf.COMPOUND_END(i) return ret @conf def CHECK_XSLTPROC_MANPAGES(conf): '''check if xsltproc can run with the given stylesheets''' if not conf.CONFIG_SET('XSLTPROC'): conf.find_program('xsltproc', var='XSLTPROC') if not conf.CONFIG_SET('XSLTPROC'): return False s='http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' conf.CHECK_COMMAND('%s --nonet %s 2> /dev/null' % (conf.env.get_flat('XSLTPROC'), s), msg='Checking for stylesheet %s' % s, define='XSLTPROC_MANPAGES', on_target=False, boolean=True) if not conf.CONFIG_SET('XSLTPROC_MANPAGES'): print("A local copy of the docbook.xsl wasn't found on your system" \ " consider installing package like docbook-xsl") # # Determine the standard libpath for the used compiler, # so we can later use that to filter out these standard # library paths when some tools like cups-config or # python-config report standard lib paths with their # ldflags (-L...) # @conf def CHECK_STANDARD_LIBPATH(conf): # at least gcc and clang support this: try: cmd = conf.env.CC + ['-print-search-dirs'] out = get_string(Utils.cmd_output(cmd)).split('\n') except ValueError: # option not supported by compiler - use a standard list of directories dirlist = [ '/usr/lib', '/usr/lib64' ] except: raise Errors.WafError('Unexpected error running "%s"' % (cmd)) else: dirlist = [] for line in out: line = line.strip() if line.startswith("libraries: ="): dirliststr = line[len("libraries: ="):] dirlist = [ os.path.normpath(x) for x in dirliststr.split(':') ] break conf.env.STANDARD_LIBPATH = dirlist ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.0334148 tevent-0.11.0/buildtools/wafsamba/samba_cross.py0000660000000000000000000001362200000000000021646 0ustar00rootroot00000000000000# functions for handling cross-compilation import os, sys, re, shlex from waflib import Utils, Logs, Options, Errors, Context from waflib.Configure import conf from wafsamba import samba_utils real_Popen = None ANSWER_UNKNOWN = (254, "") ANSWER_NO = (1, "") ANSWER_OK = (0, "") cross_answers_incomplete = False def add_answer(ca_file, msg, answer): '''add an answer to a set of cross answers''' try: f = open(ca_file, 'a') except: Logs.error("Unable to open cross-answers file %s" % ca_file) sys.exit(1) (retcode, retstring) = answer # if retstring is more than one line then we probably # don't care about its actual content (the tests should # yield one-line output in order to comply with the cross-answer # format) retstring = retstring.strip() if len(retstring.split('\n')) > 1: retstring = '' answer = (retcode, retstring) if answer == ANSWER_OK: f.write('%s: OK\n' % msg) elif answer == ANSWER_UNKNOWN: f.write('%s: UNKNOWN\n' % msg) elif answer == ANSWER_NO: f.write('%s: NO\n' % msg) else: if retcode == 0: f.write('%s: "%s"\n' % (msg, retstring)) else: f.write('%s: (%d, "%s")\n' % (msg, retcode, retstring)) f.close() def cross_answer(ca_file, msg): '''return a (retcode,retstring) tuple from a answers file''' try: f = open(ca_file, 'r') except: return ANSWER_UNKNOWN for line in f: line = line.strip() if line == '' or line[0] == '#': continue if line.find(':') != -1: a = line.split(':', 1) thismsg = a[0].strip() if thismsg != msg: continue ans = a[1].strip() if ans == "OK" or ans == "YES": f.close() return ANSWER_OK elif ans == "UNKNOWN": f.close() return ANSWER_UNKNOWN elif ans == "FAIL" or ans == "NO": f.close() return ANSWER_NO elif ans[0] == '"': f.close() return (0, ans.strip('"')) elif ans[0] == "'": f.close() return (0, ans.strip("'")) else: m = re.match('\(\s*(-?\d+)\s*,\s*\"(.*)\"\s*\)', ans) if m: f.close() return (int(m.group(1)), m.group(2)) else: raise Errors.WafError("Bad answer format '%s' in %s" % (line, ca_file)) f.close() return ANSWER_UNKNOWN class cross_Popen(Utils.subprocess.Popen): '''cross-compilation wrapper for Popen''' def __init__(*k, **kw): (obj, args) = k use_answers = False ans = ANSWER_UNKNOWN # Three possibilities: # 1. Only cross-answers - try the cross-answers file, and if # there's no corresponding answer, add to the file and mark # the configure process as unfinished. # 2. Only cross-execute - get the answer from cross-execute # 3. Both - try the cross-answers file, and if there is no # corresponding answer - use cross-execute to get an answer, # and add that answer to the file. if '--cross-answers' in args: # when --cross-answers is set, then change the arguments # to use the cross answers if available use_answers = True i = args.index('--cross-answers') ca_file = args[i+1] msg = args[i+2] ans = cross_answer(ca_file, msg) if '--cross-execute' in args and ans == ANSWER_UNKNOWN: # when --cross-execute is set, then change the arguments # to use the cross emulator i = args.index('--cross-execute') newargs = shlex.split(args[i+1]) newargs.extend(args[0:i]) if use_answers: p = real_Popen(newargs, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE, env=kw.get('env', {})) ce_out, ce_err = p.communicate() ans = (p.returncode, samba_utils.get_string(ce_out)) add_answer(ca_file, msg, ans) else: args = newargs if use_answers: if ans == ANSWER_UNKNOWN: global cross_answers_incomplete cross_answers_incomplete = True add_answer(ca_file, msg, ans) (retcode, retstring) = ans args = ['/bin/sh', '-c', "echo -n '%s'; exit %d" % (retstring, retcode)] real_Popen.__init__(*(obj, args), **kw) @conf def SAMBA_CROSS_ARGS(conf, msg=None): '''get test_args to pass when running cross compiled binaries''' if not conf.env.CROSS_COMPILE: return [] global real_Popen if real_Popen is None: real_Popen = Utils.subprocess.Popen Utils.subprocess.Popen = cross_Popen Utils.run_process = Utils.run_regular_process Utils.get_process = Utils.alloc_process_pool = Utils.nada ret = [] if conf.env.CROSS_EXECUTE: ret.extend(['--cross-execute', conf.env.CROSS_EXECUTE]) if conf.env.CROSS_ANSWERS: if msg is None: raise Errors.WafError("Cannot have NULL msg in cross-answers") ret.extend(['--cross-answers', os.path.join(Context.launch_dir, conf.env.CROSS_ANSWERS), msg]) if ret == []: raise Errors.WafError("Cannot cross-compile without either --cross-execute or --cross-answers") return ret @conf def SAMBA_CROSS_CHECK_COMPLETE(conf): '''check if we have some unanswered questions''' global cross_answers_incomplete if conf.env.CROSS_COMPILE and cross_answers_incomplete: raise Errors.WafError("Cross answers file %s is incomplete" % conf.env.CROSS_ANSWERS) return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_deps.py0000660000000000000000000012170600000000000021453 0ustar00rootroot00000000000000# Samba automatic dependency handling and project rules import os, sys, re from waflib import Build, Options, Logs, Utils, Errors from waflib.Logs import debug from waflib.Configure import conf from waflib import ConfigSet from samba_bundled import BUILTIN_LIBRARY from samba_utils import LOCAL_CACHE, TO_LIST, get_tgt_list, unique_list from samba_autoconf import library_flags @conf def ADD_GLOBAL_DEPENDENCY(ctx, dep): '''add a dependency for all binaries and libraries''' if not 'GLOBAL_DEPENDENCIES' in ctx.env: ctx.env.GLOBAL_DEPENDENCIES = [] ctx.env.GLOBAL_DEPENDENCIES.append(dep) @conf def BREAK_CIRCULAR_LIBRARY_DEPENDENCIES(ctx): '''indicate that circular dependencies between libraries should be broken.''' ctx.env.ALLOW_CIRCULAR_LIB_DEPENDENCIES = True @conf def SET_SYSLIB_DEPS(conf, target, deps): '''setup some implied dependencies for a SYSLIB''' cache = LOCAL_CACHE(conf, 'SYSLIB_DEPS') cache[target] = deps def expand_subsystem_deps(bld): '''expand the reverse dependencies resulting from subsystem attributes of modules. This is walking over the complete list of declared subsystems, and expands the samba_deps_extended list for any module<->subsystem dependencies''' subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for subsystem_name in subsystem_list: bld.ASSERT(subsystem_name in targets, "Subsystem target %s not declared" % subsystem_name) type = targets[subsystem_name] if type == 'DISABLED' or type == 'EMPTY': continue # for example, # subsystem_name = dcerpc_server (a subsystem) # subsystem = dcerpc_server (a subsystem object) # module_name = rpc_epmapper (a module within the dcerpc_server subsystem) # module = rpc_epmapper (a module object within the dcerpc_server subsystem) subsystem = bld.get_tgen_by_name(subsystem_name) bld.ASSERT(subsystem is not None, "Unable to find subsystem %s" % subsystem_name) for d in subsystem_list[subsystem_name]: module_name = d['TARGET'] module_type = targets[module_name] if module_type in ['DISABLED', 'EMPTY']: continue bld.ASSERT(subsystem is not None, "Subsystem target %s for %s (%s) not found" % (subsystem_name, module_name, module_type)) if module_type in ['SUBSYSTEM']: # if a module is a plain object type (not a library) then the # subsystem it is part of needs to have it as a dependency, so targets # that depend on this subsystem get the modules of that subsystem subsystem.samba_deps_extended.append(module_name) subsystem.samba_deps_extended = unique_list(subsystem.samba_deps_extended) def build_dependencies(self): '''This builds the dependency list for a target. It runs after all the targets are declared The reason this is not just done in the SAMBA_*() rules is that we have no way of knowing the full dependency list for a target until we have all of the targets declared. ''' if self.samba_type in ['LIBRARY', 'BINARY', 'PYTHON']: self.uselib = list(self.final_syslibs) self.uselib_local = list(self.final_libs) self.add_objects = list(self.final_objects) # extra link flags from pkg_config libs = self.final_syslibs.copy() (cflags, ldflags, cpppath) = library_flags(self, list(libs)) new_ldflags = getattr(self, 'samba_ldflags', [])[:] new_ldflags.extend(ldflags) self.ldflags = new_ldflags if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ldflags: for f in self.env.undefined_ldflags: self.ldflags.remove(f) if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ignore_ldflags: for f in self.env.undefined_ignore_ldflags: self.ldflags.append(f) debug('deps: computed dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', self.sname, self.uselib, self.uselib_local, self.add_objects) if self.samba_type in ['SUBSYSTEM']: # this is needed for the cflags of libs that come from pkg_config self.uselib = list(self.final_syslibs) self.uselib.extend(list(self.direct_syslibs)) for lib in self.final_libs: t = self.bld.get_tgen_by_name(lib) self.uselib.extend(list(t.final_syslibs)) self.uselib = unique_list(self.uselib) if getattr(self, 'uselib', None): up_list = [] for l in self.uselib: up_list.append(l.upper()) self.uselib = up_list def build_includes(self): '''This builds the right set of includes for a target. One tricky part of this is that the includes= attribute for a target needs to use paths which are relative to that targets declaration directory (which we can get at via t.path). The way this works is the includes list gets added as samba_includes in the main build task declaration. Then this function runs after all of the tasks are declared, and it processes the samba_includes attribute to produce a includes= attribute ''' if getattr(self, 'samba_includes', None) is None: return bld = self.bld inc_deps = includes_objects(bld, self, set(), {}) includes = [] # maybe add local includes if getattr(self, 'local_include', True) and getattr(self, 'local_include_first', True): includes.append('.') includes.extend(self.samba_includes_extended) if 'EXTRA_INCLUDES' in bld.env and getattr(self, 'global_include', True): includes.extend(bld.env['EXTRA_INCLUDES']) includes.append('#') inc_set = set() inc_abs = [] for d in inc_deps: t = bld.get_tgen_by_name(d) bld.ASSERT(t is not None, "Unable to find dependency %s for %s" % (d, self.sname)) inclist = getattr(t, 'samba_includes_extended', [])[:] if getattr(t, 'local_include', True): inclist.append('.') if inclist == []: continue tpath = t.samba_abspath for inc in inclist: npath = tpath + '/' + inc if not npath in inc_set: inc_abs.append(npath) inc_set.add(npath) mypath = self.path.abspath(bld.env) for inc in inc_abs: relpath = os.path.relpath(inc, mypath) includes.append(relpath) if getattr(self, 'local_include', True) and not getattr(self, 'local_include_first', True): includes.append('.') # now transform the includes list to be relative to the top directory # which is represented by '#' in waf. This allows waf to cache the # includes lists more efficiently includes_top = [] for i in includes: if i[0] == '#': # some are already top based includes_top.append(i) continue absinc = os.path.join(self.path.abspath(), i) relinc = os.path.relpath(absinc, self.bld.srcnode.abspath()) includes_top.append('#' + relinc) self.includes = unique_list(includes_top) debug('deps: includes for target %s: includes=%s', self.sname, self.includes) def add_init_functions(self): '''This builds the right set of init functions''' bld = self.bld subsystems = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') # cope with the separated object lists from BINARY and LIBRARY targets sname = self.sname if sname.endswith('.objlist'): sname = sname[0:-8] modules = [] if sname in subsystems: modules.append(sname) m = getattr(self, 'samba_modules', None) if m is not None: modules.extend(TO_LIST(m)) m = getattr(self, 'samba_subsystem', None) if m is not None: modules.append(m) if 'pyembed' in self.features: return sentinel = getattr(self, 'init_function_sentinel', 'NULL') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') cflags = getattr(self, 'samba_cflags', [])[:] if modules == []: sname = sname.replace('-','_') sname = sname.replace('.','_') sname = sname.replace('/','_') cflags.append('-DSTATIC_%s_MODULES=%s' % (sname, sentinel)) if sentinel == 'NULL': proto = "extern void __%s_dummy_module_proto(void)" % (sname) cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (sname, proto)) self.cflags = cflags return for m in modules: bld.ASSERT(m in subsystems, "No init_function defined for module '%s' in target '%s'" % (m, self.sname)) init_fn_list = [] for d in subsystems[m]: if targets[d['TARGET']] != 'DISABLED': init_fn_list.append(d['INIT_FUNCTION']) if init_fn_list == []: cflags.append('-DSTATIC_%s_MODULES=%s' % (m, sentinel)) if sentinel == 'NULL': proto = "extern void __%s_dummy_module_proto(void)" % (m) cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto)) else: cflags.append('-DSTATIC_%s_MODULES=%s' % (m, ','.join(init_fn_list) + ',' + sentinel)) proto = "".join('_MODULE_PROTO(%s)' % f for f in init_fn_list) +\ "extern void __%s_dummy_module_proto(void)" % (m) cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto)) self.cflags = cflags def check_duplicate_sources(bld, tgt_list): '''see if we are compiling the same source file more than once''' debug('deps: checking for duplicate sources') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: source_list = TO_LIST(getattr(t, 'source', '')) tpath = os.path.normpath(os.path.relpath(t.path.abspath(bld.env), t.env.BUILD_DIRECTORY + '/default')) obj_sources = set() for s in source_list: if not isinstance(s, str): print('strange path in check_duplicate_sources %r' % s) s = s.abspath() p = os.path.normpath(os.path.join(tpath, s)) if p in obj_sources: Logs.error("ERROR: source %s appears twice in target '%s'" % (p, t.sname)) sys.exit(1) obj_sources.add(p) t.samba_source_set = obj_sources subsystems = {} # build a list of targets that each source file is part of for t in tgt_list: if not targets[t.sname] in [ 'LIBRARY', 'BINARY', 'PYTHON' ]: continue for obj in t.add_objects: t2 = t.bld.get_tgen_by_name(obj) source_set = getattr(t2, 'samba_source_set', set()) for s in source_set: if not s in subsystems: subsystems[s] = {} if not t.sname in subsystems[s]: subsystems[s][t.sname] = [] subsystems[s][t.sname].append(t2.sname) for s in subsystems: if len(subsystems[s]) > 1 and Options.options.SHOW_DUPLICATES: Logs.warn("WARNING: source %s is in more than one target: %s" % (s, subsystems[s].keys())) for tname in subsystems[s]: if len(subsystems[s][tname]) > 1: raise Errors.WafError("ERROR: source %s is in more than one subsystem of target '%s': %s" % (s, tname, subsystems[s][tname])) return True def check_group_ordering(bld, tgt_list): '''see if we have any dependencies that violate the group ordering It is an error for a target to depend on a target from a later build group ''' def group_name(g): tm = bld.task_manager return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] for g in bld.task_manager.groups: gname = group_name(g) for t in g.tasks_gen: t.samba_group = gname grp_map = {} idx = 0 for g in bld.task_manager.groups: name = group_name(g) grp_map[name] = idx idx += 1 targets = LOCAL_CACHE(bld, 'TARGET_TYPE') ret = True for t in tgt_list: tdeps = getattr(t, 'add_objects', []) + getattr(t, 'uselib_local', []) for d in tdeps: t2 = bld.get_tgen_by_name(d) if t2 is None: continue map1 = grp_map[t.samba_group] map2 = grp_map[t2.samba_group] if map2 > map1: Logs.error("Target %r in build group %r depends on target %r from later build group %r" % ( t.sname, t.samba_group, t2.sname, t2.samba_group)) ret = False return ret Build.BuildContext.check_group_ordering = check_group_ordering def show_final_deps(bld, tgt_list): '''show the final dependencies for all targets''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: if not targets[t.sname] in ['LIBRARY', 'BINARY', 'PYTHON', 'SUBSYSTEM']: continue debug('deps: final dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', t.sname, t.uselib, getattr(t, 'uselib_local', []), getattr(t, 'add_objects', [])) def add_samba_attributes(bld, tgt_list): '''ensure a target has a the required samba attributes''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: if t.name != '': t.sname = t.name else: t.sname = t.target t.samba_type = targets[t.sname] t.samba_abspath = t.path.abspath(bld.env) t.samba_deps_extended = t.samba_deps[:] t.samba_includes_extended = TO_LIST(t.samba_includes)[:] t.cflags = getattr(t, 'samba_cflags', '') def replace_grouping_libraries(bld, tgt_list): '''replace dependencies based on grouping libraries If a library is marked as a grouping library, then any target that depends on a subsystem that is part of that grouping library gets that dependency replaced with a dependency on the grouping library ''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') grouping = {} # find our list of grouping libraries, mapped from the subsystems they depend on for t in tgt_list: if not getattr(t, 'grouping_library', False): continue for dep in t.samba_deps_extended: bld.ASSERT(dep in targets, "grouping library target %s not declared in %s" % (dep, t.sname)) if targets[dep] == 'SUBSYSTEM': grouping[dep] = t.sname # now replace any dependencies on elements of grouping libraries for t in tgt_list: for i in range(len(t.samba_deps_extended)): dep = t.samba_deps_extended[i] if dep in grouping: if t.sname != grouping[dep]: debug("deps: target %s: replacing dependency %s with grouping library %s" % (t.sname, dep, grouping[dep])) t.samba_deps_extended[i] = grouping[dep] def build_direct_deps(bld, tgt_list): '''build the direct_objects and direct_libs sets for each target''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') syslib_deps = LOCAL_CACHE(bld, 'SYSLIB_DEPS') global_deps = bld.env.GLOBAL_DEPENDENCIES global_deps_exclude = set() for dep in global_deps: t = bld.get_tgen_by_name(dep) for d in t.samba_deps: # prevent loops from the global dependencies list global_deps_exclude.add(d) global_deps_exclude.add(d + '.objlist') for t in tgt_list: t.direct_objects = set() t.direct_libs = set() t.direct_syslibs = set() deps = t.samba_deps_extended[:] if getattr(t, 'samba_use_global_deps', False) and not t.sname in global_deps_exclude: deps.extend(global_deps) for d in deps: if d == t.sname: continue if not d in targets: Logs.error("Unknown dependency '%s' in '%s'" % (d, t.sname)) sys.exit(1) if targets[d] in [ 'EMPTY', 'DISABLED' ]: continue if targets[d] == 'PYTHON' and targets[t.sname] != 'PYTHON' and t.sname.find('.objlist') == -1: # this check should be more restrictive, but for now we have pidl-generated python # code that directly depends on other python modules Logs.error('ERROR: Target %s has dependency on python module %s' % (t.sname, d)) sys.exit(1) if targets[d] == 'SYSLIB': t.direct_syslibs.add(d) if d in syslib_deps: for implied in TO_LIST(syslib_deps[d]): if BUILTIN_LIBRARY(bld, implied): t.direct_objects.add(implied) elif targets[implied] == 'SYSLIB': t.direct_syslibs.add(implied) elif targets[implied] in ['LIBRARY', 'MODULE']: t.direct_libs.add(implied) else: Logs.error('Implied dependency %s in %s is of type %s' % ( implied, t.sname, targets[implied])) sys.exit(1) continue t2 = bld.get_tgen_by_name(d) if t2 is None: Logs.error("no task %s of type %s in %s" % (d, targets[d], t.sname)) sys.exit(1) if t2.samba_type in [ 'LIBRARY', 'MODULE' ]: t.direct_libs.add(d) elif t2.samba_type in [ 'SUBSYSTEM', 'ASN1', 'PYTHON' ]: t.direct_objects.add(d) debug('deps: built direct dependencies') def dependency_loop(loops, t, target): '''add a dependency loop to the loops dictionary''' if t.sname == target: return if not target in loops: loops[target] = set() if not t.sname in loops[target]: loops[target].add(t.sname) def indirect_libs(bld, t, chain, loops): '''recursively calculate the indirect library dependencies for a target An indirect library is a library that results from a dependency on a subsystem ''' ret = getattr(t, 'indirect_libs', None) if ret is not None: return ret ret = set() for obj in t.direct_objects: if obj in chain: dependency_loop(loops, t, obj) continue chain.add(obj) t2 = bld.get_tgen_by_name(obj) r2 = indirect_libs(bld, t2, chain, loops) chain.remove(obj) ret = ret.union(t2.direct_libs) ret = ret.union(r2) for obj in indirect_objects(bld, t, set(), loops): if obj in chain: dependency_loop(loops, t, obj) continue chain.add(obj) t2 = bld.get_tgen_by_name(obj) r2 = indirect_libs(bld, t2, chain, loops) chain.remove(obj) ret = ret.union(t2.direct_libs) ret = ret.union(r2) t.indirect_libs = ret return ret def indirect_objects(bld, t, chain, loops): '''recursively calculate the indirect object dependencies for a target indirect objects are the set of objects from expanding the subsystem dependencies ''' ret = getattr(t, 'indirect_objects', None) if ret is not None: return ret ret = set() for lib in t.direct_objects: if lib in chain: dependency_loop(loops, t, lib) continue chain.add(lib) t2 = bld.get_tgen_by_name(lib) r2 = indirect_objects(bld, t2, chain, loops) chain.remove(lib) ret = ret.union(t2.direct_objects) ret = ret.union(r2) t.indirect_objects = ret return ret def extended_objects(bld, t, chain): '''recursively calculate the extended object dependencies for a target extended objects are the union of: - direct objects - indirect objects - direct and indirect objects of all direct and indirect libraries ''' ret = getattr(t, 'extended_objects', None) if ret is not None: return ret ret = set() ret = ret.union(t.final_objects) for lib in t.final_libs: if lib in chain: continue t2 = bld.get_tgen_by_name(lib) chain.add(lib) r2 = extended_objects(bld, t2, chain) chain.remove(lib) ret = ret.union(t2.final_objects) ret = ret.union(r2) t.extended_objects = ret return ret def includes_objects(bld, t, chain, inc_loops): '''recursively calculate the includes object dependencies for a target includes dependencies come from either library or object dependencies ''' ret = getattr(t, 'includes_objects', None) if ret is not None: return ret ret = t.direct_objects.copy() ret = ret.union(t.direct_libs) for obj in t.direct_objects: if obj in chain: dependency_loop(inc_loops, t, obj) continue chain.add(obj) t2 = bld.get_tgen_by_name(obj) r2 = includes_objects(bld, t2, chain, inc_loops) chain.remove(obj) ret = ret.union(t2.direct_objects) ret = ret.union(r2) for lib in t.direct_libs: if lib in chain: dependency_loop(inc_loops, t, lib) continue chain.add(lib) t2 = bld.get_tgen_by_name(lib) if t2 is None: targets = LOCAL_CACHE(bld, 'TARGET_TYPE') Logs.error('Target %s of type %s not found in direct_libs for %s' % ( lib, targets[lib], t.sname)) sys.exit(1) r2 = includes_objects(bld, t2, chain, inc_loops) chain.remove(lib) ret = ret.union(t2.direct_objects) ret = ret.union(r2) t.includes_objects = ret return ret def break_dependency_loops(bld, tgt_list): '''find and break dependency loops''' loops = {} inc_loops = {} # build up the list of loops for t in tgt_list: indirect_objects(bld, t, set(), loops) indirect_libs(bld, t, set(), loops) includes_objects(bld, t, set(), inc_loops) # break the loops for t in tgt_list: if t.sname in loops: for attr in ['direct_objects', 'indirect_objects', 'direct_libs', 'indirect_libs']: objs = getattr(t, attr, set()) setattr(t, attr, objs.difference(loops[t.sname])) for loop in loops: debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) for loop in inc_loops: debug('deps: Found include loops for target %s : %s', loop, inc_loops[loop]) # expand the loops mapping by one level for loop in loops.copy(): for tgt in loops[loop]: if tgt in loops: loops[loop] = loops[loop].union(loops[tgt]) for loop in inc_loops.copy(): for tgt in inc_loops[loop]: if tgt in inc_loops: inc_loops[loop] = inc_loops[loop].union(inc_loops[tgt]) # expand indirect subsystem and library loops for loop in loops.copy(): t = bld.get_tgen_by_name(loop) if t.samba_type in ['SUBSYSTEM']: loops[loop] = loops[loop].union(t.indirect_objects) loops[loop] = loops[loop].union(t.direct_objects) if t.samba_type in ['LIBRARY','PYTHON']: loops[loop] = loops[loop].union(t.indirect_libs) loops[loop] = loops[loop].union(t.direct_libs) if loop in loops[loop]: loops[loop].remove(loop) # expand indirect includes loops for loop in inc_loops.copy(): t = bld.get_tgen_by_name(loop) inc_loops[loop] = inc_loops[loop].union(t.includes_objects) if loop in inc_loops[loop]: inc_loops[loop].remove(loop) # add in the replacement dependencies for t in tgt_list: for loop in loops: for attr in ['indirect_objects', 'indirect_libs']: objs = getattr(t, attr, set()) if loop in objs: diff = loops[loop].difference(objs) if t.sname in diff: diff.remove(t.sname) if diff: debug('deps: Expanded target %s of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) objs = objs.union(diff) setattr(t, attr, objs) for loop in inc_loops: objs = getattr(t, 'includes_objects', set()) if loop in objs: diff = inc_loops[loop].difference(objs) if t.sname in diff: diff.remove(t.sname) if diff: debug('deps: Expanded target %s includes of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) objs = objs.union(diff) setattr(t, 'includes_objects', objs) def reduce_objects(bld, tgt_list): '''reduce objects by looking for indirect object dependencies''' rely_on = {} for t in tgt_list: t.extended_objects = None changed = False for type in ['BINARY', 'PYTHON', 'LIBRARY']: for t in tgt_list: if t.samba_type != type: continue # if we will indirectly link to a target then we don't need it new = t.final_objects.copy() for l in t.final_libs: t2 = bld.get_tgen_by_name(l) t2_obj = extended_objects(bld, t2, set()) dup = new.intersection(t2_obj) if t.sname in rely_on: dup = dup.difference(rely_on[t.sname]) if dup: # Do not remove duplicates of BUILTINS d = next(iter(dup)) if BUILTIN_LIBRARY(bld, d): continue debug('deps: removing dups from %s of type %s: %s also in %s %s', t.sname, t.samba_type, dup, t2.samba_type, l) new = new.difference(dup) changed = True if not l in rely_on: rely_on[l] = set() rely_on[l] = rely_on[l].union(dup) t.final_objects = new if not changed: return False # add back in any objects that were relied upon by the reduction rules for r in rely_on: t = bld.get_tgen_by_name(r) t.final_objects = t.final_objects.union(rely_on[r]) return True def show_library_loop(bld, lib1, lib2, path, seen): '''show the detailed path of a library loop between lib1 and lib2''' t = bld.get_tgen_by_name(lib1) if not lib2 in getattr(t, 'final_libs', set()): return for d in t.samba_deps_extended: if d in seen: continue seen.add(d) path2 = path + '=>' + d if d == lib2: Logs.warn('library loop path: ' + path2) return show_library_loop(bld, d, lib2, path2, seen) seen.remove(d) def calculate_final_deps(bld, tgt_list, loops): '''calculate the final library and object dependencies''' for t in tgt_list: # start with the maximum possible list t.final_libs = t.direct_libs.union(indirect_libs(bld, t, set(), loops)) t.final_objects = t.direct_objects.union(indirect_objects(bld, t, set(), loops)) for t in tgt_list: # don't depend on ourselves if t.sname in t.final_libs: t.final_libs.remove(t.sname) if t.sname in t.final_objects: t.final_objects.remove(t.sname) # handle any non-shared binaries for t in tgt_list: if t.samba_type == 'BINARY' and bld.NONSHARED_BINARY(t.sname): subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') # replace lib deps with objlist deps for l in t.final_libs: objname = l + '.objlist' t2 = bld.get_tgen_by_name(objname) if t2 is None: Logs.error('ERROR: subsystem %s not found' % objname) sys.exit(1) t.final_objects.add(objname) t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) if l in subsystem_list: # its a subsystem - we also need the contents of any modules for d in subsystem_list[l]: module_name = d['TARGET'] if targets[module_name] == 'LIBRARY': objname = module_name + '.objlist' elif targets[module_name] == 'SUBSYSTEM': objname = module_name else: continue t2 = bld.get_tgen_by_name(objname) if t2 is None: Logs.error('ERROR: subsystem %s not found' % objname) sys.exit(1) t.final_objects.add(objname) t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) t.final_libs = set() # find any library loops for t in tgt_list: if t.samba_type in ['LIBRARY', 'PYTHON']: for l in t.final_libs.copy(): t2 = bld.get_tgen_by_name(l) if t.sname in t2.final_libs: if getattr(bld.env, "ALLOW_CIRCULAR_LIB_DEPENDENCIES", False): # we could break this in either direction. If one of the libraries # has a version number, and will this be distributed publicly, then # we should make it the lower level library in the DAG Logs.warn('deps: removing library loop %s from %s' % (t.sname, t2.sname)) dependency_loop(loops, t, t2.sname) t2.final_libs.remove(t.sname) else: Logs.error('ERROR: circular library dependency between %s and %s' % (t.sname, t2.sname)) show_library_loop(bld, t.sname, t2.sname, t.sname, set()) show_library_loop(bld, t2.sname, t.sname, t2.sname, set()) sys.exit(1) for loop in loops: debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) # we now need to make corrections for any library loops we broke up # any target that depended on the target of the loop and doesn't # depend on the source of the loop needs to get the loop source added for type in ['BINARY','PYTHON','LIBRARY','BINARY']: for t in tgt_list: if t.samba_type != type: continue for loop in loops: if loop in t.final_libs: diff = loops[loop].difference(t.final_libs) if t.sname in diff: diff.remove(t.sname) if t.sname in diff: diff.remove(t.sname) # make sure we don't recreate the loop again! for d in diff.copy(): t2 = bld.get_tgen_by_name(d) if t2.samba_type == 'LIBRARY': if t.sname in t2.final_libs: debug('deps: removing expansion %s from %s', d, t.sname) diff.remove(d) if diff: debug('deps: Expanded target %s by loop %s libraries (loop %s) %s', t.sname, loop, loops[loop], diff) t.final_libs = t.final_libs.union(diff) # remove objects that are also available in linked libs count = 0 while reduce_objects(bld, tgt_list): count += 1 if count > 100: Logs.warn("WARNING: Unable to remove all inter-target object duplicates") break debug('deps: Object reduction took %u iterations', count) # add in any syslib dependencies for t in tgt_list: if not t.samba_type in ['BINARY','PYTHON','LIBRARY','SUBSYSTEM']: continue syslibs = set() for d in t.final_objects: t2 = bld.get_tgen_by_name(d) syslibs = syslibs.union(t2.direct_syslibs) # this adds the indirect syslibs as well, which may not be needed # depending on the linker flags for d in t.final_libs: t2 = bld.get_tgen_by_name(d) syslibs = syslibs.union(t2.direct_syslibs) t.final_syslibs = syslibs # find any unresolved library loops lib_loop_error = False for t in tgt_list: if t.samba_type in ['LIBRARY', 'PYTHON']: for l in t.final_libs.copy(): t2 = bld.get_tgen_by_name(l) if t.sname in t2.final_libs: Logs.error('ERROR: Unresolved library loop %s from %s' % (t.sname, t2.sname)) lib_loop_error = True if lib_loop_error: sys.exit(1) debug('deps: removed duplicate dependencies') def show_dependencies(bld, target, seen): '''recursively show the dependencies of target''' if target in seen: return t = bld.get_tgen_by_name(target) if t is None: Logs.error("ERROR: Unable to find target '%s'" % target) sys.exit(1) Logs.info('%s(OBJECTS): %s' % (target, t.direct_objects)) Logs.info('%s(LIBS): %s' % (target, t.direct_libs)) Logs.info('%s(SYSLIBS): %s' % (target, t.direct_syslibs)) seen.add(target) for t2 in t.direct_objects: show_dependencies(bld, t2, seen) def show_object_duplicates(bld, tgt_list): '''show a list of object files that are included in more than one library or binary''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') used_by = {} Logs.info("showing duplicate objects") for t in tgt_list: if not targets[t.sname] in [ 'LIBRARY', 'PYTHON' ]: continue for n in getattr(t, 'final_objects', set()): t2 = bld.get_tgen_by_name(n) if not n in used_by: used_by[n] = set() used_by[n].add(t.sname) for n in used_by: if len(used_by[n]) > 1: Logs.info("target '%s' is used by %s" % (n, used_by[n])) Logs.info("showing indirect dependency counts (sorted by count)") def indirect_count(t1, t2): return len(t2.indirect_objects) - len(t1.indirect_objects) sorted_list = sorted(tgt_list, cmp=indirect_count) for t in sorted_list: if len(t.indirect_objects) > 1: Logs.info("%s depends on %u indirect objects" % (t.sname, len(t.indirect_objects))) ###################################################################### # this provides a way to save our dependency calculations between runs savedeps_version = 3 savedeps_inputs = ['samba_deps', 'samba_includes', 'local_include', 'local_include_first', 'samba_cflags', 'source', 'grouping_library', 'samba_ldflags', 'allow_undefined_symbols', 'use_global_deps', 'global_include' ] savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes', 'cflags', 'ldflags', 'samba_deps_extended', 'final_libs'] savedeps_outenv = ['INC_PATHS'] savedeps_envvars = ['NONSHARED_BINARIES', 'GLOBAL_DEPENDENCIES', 'EXTRA_CFLAGS', 'EXTRA_LDFLAGS', 'EXTRA_INCLUDES' ] savedeps_caches = ['GLOBAL_DEPENDENCIES', 'TARGET_TYPE', 'INIT_FUNCTIONS', 'SYSLIB_DEPS'] savedeps_files = ['buildtools/wafsamba/samba_deps.py'] def save_samba_deps(bld, tgt_list): '''save the dependency calculations between builds, to make further builds faster''' denv = ConfigSet.ConfigSet() denv.version = savedeps_version denv.savedeps_inputs = savedeps_inputs denv.savedeps_outputs = savedeps_outputs denv.input = {} denv.output = {} denv.outenv = {} denv.caches = {} denv.envvar = {} denv.files = {} for f in savedeps_files: denv.files[f] = os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime for c in savedeps_caches: denv.caches[c] = LOCAL_CACHE(bld, c) for e in savedeps_envvars: denv.envvar[e] = bld.env[e] for t in tgt_list: # save all the input attributes for each target tdeps = {} for attr in savedeps_inputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if tdeps != {}: denv.input[t.sname] = tdeps # save all the output attributes for each target tdeps = {} for attr in savedeps_outputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if tdeps != {}: denv.output[t.sname] = tdeps tdeps = {} for attr in savedeps_outenv: if attr in t.env: tdeps[attr] = t.env[attr] if tdeps != {}: denv.outenv[t.sname] = tdeps depsfile = os.path.join(bld.cache_dir, "sambadeps") denv.store_fast(depsfile) def load_samba_deps(bld, tgt_list): '''load a previous set of build dependencies if possible''' depsfile = os.path.join(bld.cache_dir, "sambadeps") denv = ConfigSet.ConfigSet() try: debug('deps: checking saved dependencies') denv.load_fast(depsfile) if (denv.version != savedeps_version or denv.savedeps_inputs != savedeps_inputs or denv.savedeps_outputs != savedeps_outputs): return False except Exception: return False # check if critical files have changed for f in savedeps_files: if f not in denv.files: return False if denv.files[f] != os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime: return False # check if caches are the same for c in savedeps_caches: if c not in denv.caches or denv.caches[c] != LOCAL_CACHE(bld, c): return False # check if caches are the same for e in savedeps_envvars: if e not in denv.envvar or denv.envvar[e] != bld.env[e]: return False # check inputs are the same for t in tgt_list: tdeps = {} for attr in savedeps_inputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if t.sname in denv.input: olddeps = denv.input[t.sname] else: olddeps = {} if tdeps != olddeps: #print '%s: \ntdeps=%s \nodeps=%s' % (t.sname, tdeps, olddeps) return False # put outputs in place for t in tgt_list: if not t.sname in denv.output: continue tdeps = denv.output[t.sname] for a in tdeps: setattr(t, a, tdeps[a]) # put output env vars in place for t in tgt_list: if not t.sname in denv.outenv: continue tdeps = denv.outenv[t.sname] for a in tdeps: t.env[a] = tdeps[a] debug('deps: loaded saved dependencies') return True def check_project_rules(bld): '''check the project rules - ensuring the targets are sane''' loops = {} inc_loops = {} tgt_list = get_tgt_list(bld) add_samba_attributes(bld, tgt_list) force_project_rules = (Options.options.SHOWDEPS or Options.options.SHOW_DUPLICATES) if not force_project_rules and load_samba_deps(bld, tgt_list): return timer = Utils.Timer() bld.new_rules = True Logs.info("Checking project rules ...") debug('deps: project rules checking started') expand_subsystem_deps(bld) debug("deps: expand_subsystem_deps: %s" % str(timer)) replace_grouping_libraries(bld, tgt_list) debug("deps: replace_grouping_libraries: %s" % str(timer)) build_direct_deps(bld, tgt_list) debug("deps: build_direct_deps: %s" % str(timer)) break_dependency_loops(bld, tgt_list) debug("deps: break_dependency_loops: %s" % str(timer)) if Options.options.SHOWDEPS: show_dependencies(bld, Options.options.SHOWDEPS, set()) calculate_final_deps(bld, tgt_list, loops) debug("deps: calculate_final_deps: %s" % str(timer)) if Options.options.SHOW_DUPLICATES: show_object_duplicates(bld, tgt_list) # run the various attribute generators for f in [ build_dependencies, build_includes, add_init_functions ]: debug('deps: project rules checking %s', f) for t in tgt_list: f(t) debug("deps: %s: %s" % (f, str(timer))) debug('deps: project rules stage1 completed') if not check_duplicate_sources(bld, tgt_list): Logs.error("Duplicate sources present - aborting") sys.exit(1) debug("deps: check_duplicate_sources: %s" % str(timer)) if not bld.check_group_ordering(tgt_list): Logs.error("Bad group ordering - aborting") sys.exit(1) debug("deps: check_group_ordering: %s" % str(timer)) show_final_deps(bld, tgt_list) debug("deps: show_final_deps: %s" % str(timer)) debug('deps: project rules checking completed - %u targets checked', len(tgt_list)) if not bld.is_install: save_samba_deps(bld, tgt_list) debug("deps: save_samba_deps: %s" % str(timer)) Logs.info("Project rules pass") def CHECK_PROJECT_RULES(bld): '''enable checking of project targets for sanity''' if bld.env.added_project_rules: return bld.env.added_project_rules = True bld.add_pre_fun(check_project_rules) Build.BuildContext.CHECK_PROJECT_RULES = CHECK_PROJECT_RULES ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_dist.py0000660000000000000000000002122200000000000021453 0ustar00rootroot00000000000000# customised version of 'waf dist' for Samba tools # uses git ls-files to get file lists import os, sys, tarfile from waflib import Utils, Scripting, Logs, Options from waflib.Configure import conf from samba_utils import get_string from waflib import Context dist_dirs = None dist_files = None dist_blacklist = "" dist_archive = None class Dist(Context.Context): # TODO remove cmd = 'dist' fun = 'dist' def execute(self): Context.g_module.dist() class DistCheck(Scripting.DistCheck): fun = 'distcheck' cmd = 'distcheck' def execute(self): Options.options.distcheck_args = '' if Context.g_module.distcheck is Scripting.distcheck: # default Context.g_module.distcheck(self) else: Context.g_module.distcheck() Context.g_module.dist() self.check() def get_arch_name(self): global dist_archive return dist_archive def make_distcheck_cmd(self, tmpdir): waf = os.path.abspath(sys.argv[0]) return [sys.executable, waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + tmpdir] def add_symlink(tar, fname, abspath, basedir): '''handle symlinks to directories that may move during packaging''' if not os.path.islink(abspath): return False tinfo = tar.gettarinfo(name=abspath, arcname=fname) tgt = os.readlink(abspath) if dist_dirs: # we need to find the target relative to the main directory # this is here to cope with symlinks into the buildtools # directory from within the standalone libraries in Samba. For example, # a symlink to ../../builtools/scripts/autogen-waf.sh needs # to be rewritten as a symlink to buildtools/scripts/autogen-waf.sh # when the tarball for talloc is built # the filename without the appname-version rel_fname = '/'.join(fname.split('/')[1:]) # join this with the symlink target tgt_full = os.path.join(os.path.dirname(rel_fname), tgt) # join with the base directory tgt_base = os.path.normpath(os.path.join(basedir, tgt_full)) # see if this is inside one of our dist_dirs for dir in dist_dirs.split(): if dir.find(':') != -1: destdir=dir.split(':')[1] dir=dir.split(':')[0] else: destdir = '.' if dir == basedir: # internal links don't get rewritten continue if dir == tgt_base[0:len(dir)] and tgt_base[len(dir)] == '/': new_tgt = destdir + tgt_base[len(dir):] tinfo.linkname = new_tgt break tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' tar.addfile(tinfo) return True def add_tarfile(tar, fname, abspath, basedir): '''add a file to the tarball''' if add_symlink(tar, fname, abspath, basedir): return try: tinfo = tar.gettarinfo(name=abspath, arcname=fname) except OSError: Logs.error('Unable to find file %s - missing from git checkout?' % abspath) sys.exit(1) tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' fh = open(abspath, "rb") tar.addfile(tinfo, fileobj=fh) fh.close() def vcs_dir_contents(path): """Return the versioned files under a path. :return: List of paths relative to path """ repo = path while repo != "/": if os.path.isdir(os.path.join(repo, ".git")): ls_files_cmd = [ 'git', 'ls-files', '--full-name', os.path.relpath(path, repo) ] cwd = None env = dict(os.environ) env["GIT_DIR"] = os.path.join(repo, ".git") break repo = os.path.dirname(repo) if repo == "/": raise Exception("unsupported or no vcs for %s" % path) return get_string(Utils.cmd_output(ls_files_cmd, cwd=cwd, env=env)).split('\n') def dist(appname='', version=''): def add_files_to_tarball(tar, srcdir, srcsubdir, dstdir, dstsubdir, blacklist, files): if blacklist is None: blacklist = [] for f in files: abspath = os.path.join(srcdir, f) if srcsubdir != '.': f = f[len(srcsubdir)+1:] # Remove files in the blacklist if f in blacklist: continue blacklisted = False # Remove directories in the blacklist for d in blacklist: if f.startswith(d): blacklisted = True if blacklisted: continue if os.path.isdir(abspath) and not os.path.islink(abspath): continue if dstsubdir != '.': f = dstsubdir + '/' + f fname = dstdir + '/' + f add_tarfile(tar, fname, abspath, srcsubdir) def list_directory_files(path): curdir = os.getcwd() os.chdir(srcdir) out_files = [] for root, dirs, files in os.walk(path): for f in files: out_files.append(os.path.join(root, f)) os.chdir(curdir) return out_files if not isinstance(appname, str) or not appname: # this copes with a mismatch in the calling arguments for dist() appname = Context.g_module.APPNAME version = Context.g_module.VERSION if not version: version = Context.g_module.VERSION srcdir = os.path.normpath( os.path.join(os.path.dirname(Context.g_module.root_path), Context.g_module.top)) if not dist_dirs: Logs.error('You must use samba_dist.DIST_DIRS() to set which directories to package') sys.exit(1) dist_base = '%s-%s' % (appname, version) if Options.options.SIGN_RELEASE: dist_name = '%s.tar' % (dist_base) tar = tarfile.open(dist_name, 'w') else: dist_name = '%s.tar.gz' % (dist_base) tar = tarfile.open(dist_name, 'w:gz') blacklist = dist_blacklist.split() for dir in dist_dirs.split(): if dir.find(':') != -1: destdir=dir.split(':')[1] dir=dir.split(':')[0] else: destdir = '.' absdir = os.path.join(srcdir, dir) try: files = vcs_dir_contents(absdir) except Exception as e: Logs.error('unable to get contents of %s: %s' % (absdir, e)) sys.exit(1) add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) if dist_files: for file in dist_files.split(): if file.find(':') != -1: destfile = file.split(':')[1] file = file.split(':')[0] else: destfile = file absfile = os.path.join(srcdir, file) if os.path.isdir(absfile) and not os.path.islink(absfile): destdir = destfile dir = file files = list_directory_files(dir) add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) else: fname = dist_base + '/' + destfile add_tarfile(tar, fname, absfile, destfile) tar.close() if Options.options.SIGN_RELEASE: import gzip try: os.unlink(dist_name + '.asc') except OSError: pass cmd = "gpg --detach-sign --armor " + dist_name os.system(cmd) uncompressed_tar = open(dist_name, 'rb') compressed_tar = gzip.open(dist_name + '.gz', 'wb') while 1: buffer = uncompressed_tar.read(1048576) if buffer: compressed_tar.write(buffer) else: break uncompressed_tar.close() compressed_tar.close() os.unlink(dist_name) Logs.info('Created %s.gz %s.asc' % (dist_name, dist_name)) dist_name = dist_name + '.gz' else: Logs.info('Created %s' % dist_name) # TODO use the ctx object instead global dist_archive dist_archive = dist_name return dist_name @conf def DIST_DIRS(dirs): '''set the directories to package, relative to top srcdir''' global dist_dirs if not dist_dirs: dist_dirs = dirs @conf def DIST_FILES(files, extend=False): '''set additional files for packaging, relative to top srcdir''' global dist_files if not dist_files: dist_files = files elif extend: dist_files = dist_files + " " + files @conf def DIST_BLACKLIST(blacklist): '''set the files to exclude from packaging, relative to top srcdir''' global dist_blacklist if not dist_blacklist: dist_blacklist = blacklist Scripting.dist = dist ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/samba_git.py0000660000000000000000000000342000000000000021273 0ustar00rootroot00000000000000import os import subprocess def find_git(env=None): """Find the git binary.""" if env is not None and 'GIT' in env: return env.get_flat('GIT') # Get version from GIT if os.path.exists("/usr/bin/git"): # this is useful when doing make dist without configuring return "/usr/bin/git" return None def has_submodules(path): """Check whether a source directory is git-versioned and has submodules. :param path: Path to Samba source directory """ return (os.path.isdir(os.path.join(path, ".git")) and os.path.isfile(os.path.join(path, ".gitmodules"))) def read_submodule_status(path, env=None): """Check status of submodules. :param path: Path to git directory :param env: Optional waf environment :return: Yields tuples with submodule relpath and status (one of: 'out-of-date', 'not-checked-out', 'up-to-date') :raise RuntimeError: raised when parsing of 'git submodule status' output fails. """ if not has_submodules(path): # No point in running git. return git = find_git(env) if git is None: return p = subprocess.Popen([git, "submodule", "status"], stdout=subprocess.PIPE, cwd=path) (stdout, stderr) = p.communicate(None) for l in stdout.splitlines(): l = l.rstrip() status = l[0] l = l[1:] parts = l.split(" ") if len(parts) > 2 and status in ("-", "+"): yield (parts[1], "out-of-date") elif len(parts) == 2 and status == "-": yield (parts[1], "not-checked-out") elif len(parts) > 2 and status == " ": yield (parts[1], "up-to-date") else: raise RuntimeError("Unable to parse submodule status: %r, %r" % (status, parts)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_headers.py0000660000000000000000000001473100000000000022132 0ustar00rootroot00000000000000# specialist handling of header files for Samba import os, re, sys, fnmatch from waflib import Build, Logs, Utils, Errors from samba_utils import TO_LIST def header_install_path(header, header_path): '''find the installation path for a header, given a header_path option''' if not header_path: return '' if not isinstance(header_path, list): return header_path for (p1, dir) in header_path: for p2 in TO_LIST(p1): if fnmatch.fnmatch(header, p2): return dir # default to current path return '' re_header = re.compile('^\s*#\s*include[ \t]*"([^"]+)"', re.I | re.M) # a dictionary mapping source header paths to public header paths header_map = {} def find_suggested_header(hpath): '''find a suggested header path to use''' base = os.path.basename(hpath) ret = [] for h in header_map: if os.path.basename(h) == base: ret.append('<%s>' % header_map[h]) ret.append('"%s"' % h) return ret def create_public_header(task): '''create a public header from a private one, output within the build tree''' src = task.inputs[0].abspath(task.env) tgt = task.outputs[0].bldpath(task.env) if os.path.exists(tgt): os.unlink(tgt) relsrc = os.path.relpath(src, task.env.TOPDIR) infile = open(src, mode='r') outfile = open(tgt, mode='w') linenumber = 0 search_paths = [ '', task.env.RELPATH ] for i in task.env.EXTRA_INCLUDES: if i.startswith('#'): search_paths.append(i[1:]) for line in infile: linenumber += 1 # allow some straight substitutions if task.env.public_headers_replace and line.strip() in task.env.public_headers_replace: outfile.write(task.env.public_headers_replace[line.strip()] + '\n') continue # see if its an include line m = re_header.match(line) if m is None: outfile.write(line) continue # its an include, get the header path hpath = m.group(1) if hpath.startswith("bin/default/"): hpath = hpath[12:] # some are always allowed if task.env.public_headers_skip and hpath in task.env.public_headers_skip: outfile.write(line) continue # work out the header this refers to found = False for s in search_paths: p = os.path.normpath(os.path.join(s, hpath)) if p in header_map: outfile.write("#include <%s>\n" % header_map[p]) found = True break if found: continue if task.env.public_headers_allow_broken: Logs.warn("Broken public header include '%s' in '%s'" % (hpath, relsrc)) outfile.write(line) continue # try to be nice to the developer by suggesting an alternative suggested = find_suggested_header(hpath) outfile.close() os.unlink(tgt) sys.stderr.write("%s:%u:Error: unable to resolve public header %s (maybe try one of %s)\n" % ( os.path.relpath(src, os.getcwd()), linenumber, hpath, suggested)) raise Errors.WafError("Unable to resolve header path '%s' in public header '%s' in directory %s" % ( hpath, relsrc, task.env.RELPATH)) infile.close() outfile.close() def public_headers_simple(bld, public_headers, header_path=None, public_headers_install=True): '''install some headers - simple version, no munging needed ''' if not public_headers_install: return for h in TO_LIST(public_headers): inst_path = header_install_path(h, header_path) if h.find(':') != -1: s = h.split(":") h_name = s[0] inst_name = s[1] else: h_name = h inst_name = os.path.basename(h) bld.INSTALL_FILES('${INCLUDEDIR}', h_name, destname=inst_name) def PUBLIC_HEADERS(bld, public_headers, header_path=None, public_headers_install=True): '''install some headers header_path may either be a string that is added to the INCLUDEDIR, or it can be a dictionary of wildcard patterns which map to destination directories relative to INCLUDEDIR ''' bld.SET_BUILD_GROUP('final') if not bld.env.build_public_headers: # in this case no header munging neeeded. Used for tdb, talloc etc public_headers_simple(bld, public_headers, header_path=header_path, public_headers_install=public_headers_install) return # create the public header in the given path # in the build tree for h in TO_LIST(public_headers): inst_path = header_install_path(h, header_path) if h.find(':') != -1: s = h.split(":") h_name = s[0] inst_name = s[1] else: h_name = h inst_name = os.path.basename(h) curdir = bld.path.abspath() relpath1 = os.path.relpath(bld.srcnode.abspath(), curdir) relpath2 = os.path.relpath(curdir, bld.srcnode.abspath()) targetdir = os.path.normpath(os.path.join(relpath1, bld.env.build_public_headers, inst_path)) if not os.path.exists(os.path.join(curdir, targetdir)): raise Errors.WafError("missing source directory %s for public header %s" % (targetdir, inst_name)) target = os.path.join(targetdir, inst_name) # the source path of the header, relative to the top of the source tree src_path = os.path.normpath(os.path.join(relpath2, h_name)) # the install path of the header, relative to the public include directory target_path = os.path.normpath(os.path.join(inst_path, inst_name)) header_map[src_path] = target_path t = bld.SAMBA_GENERATOR('HEADER_%s/%s/%s' % (relpath2, inst_path, inst_name), group='headers', rule=create_public_header, source=h_name, target=target) t.env.RELPATH = relpath2 t.env.TOPDIR = bld.srcnode.abspath() if not bld.env.public_headers_list: bld.env.public_headers_list = [] bld.env.public_headers_list.append(os.path.join(inst_path, inst_name)) if public_headers_install: bld.INSTALL_FILES('${INCLUDEDIR}', target, destname=os.path.join(inst_path, inst_name), flat=True) Build.BuildContext.PUBLIC_HEADERS = PUBLIC_HEADERS ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_install.py0000660000000000000000000002037500000000000022166 0ustar00rootroot00000000000000########################### # this handles the magic we need to do for installing # with all the configure options that affect rpath and shared # library use import os from waflib import Utils, Errors from waflib.TaskGen import feature, before, after from samba_utils import LIB_PATH, MODE_755, install_rpath, build_rpath @feature('install_bin') @after('apply_core') @before('apply_link', 'apply_obj_vars') def install_binary(self): '''install a binary, taking account of the different rpath variants''' bld = self.bld # get the ldflags we will use for install and build install_ldflags = install_rpath(self) build_ldflags = build_rpath(bld) if not self.bld.is_install: # just need to set rpath if we are not installing self.env.RPATH = build_ldflags return # work out the install path, expanding variables install_path = getattr(self, 'samba_inst_path', None) or '${BINDIR}' install_path = bld.EXPAND_VARIABLES(install_path) orig_target = os.path.basename(self.target) if install_ldflags != build_ldflags: # we will be creating a new target name, and using that for the # install link. That stops us from overwriting the existing build # target, which has different ldflags self.target += '.inst' # setup the right rpath link flags for the install self.env.RPATH = install_ldflags if not self.samba_install: # this binary is marked not to be installed return # tell waf to install the right binary bld.install_as(os.path.join(install_path, orig_target), self.path.find_or_declare(self.target), chmod=MODE_755) @feature('install_lib') @after('apply_core') @before('apply_link', 'apply_obj_vars') def install_library(self): '''install a library, taking account of the different rpath variants''' if getattr(self, 'done_install_library', False): return bld = self.bld default_env = bld.all_envs['default'] try: install_ldflags = install_rpath(self) build_ldflags = build_rpath(bld) if not self.bld.is_install or not getattr(self, 'samba_install', True): # just need to set the build rpath if we are not installing self.env.RPATH = build_ldflags return # setup the install path, expanding variables install_path = getattr(self, 'samba_inst_path', None) if install_path is None: if getattr(self, 'private_library', False): install_path = '${PRIVATELIBDIR}' else: install_path = '${LIBDIR}' install_path = bld.EXPAND_VARIABLES(install_path) target_name = self.target if install_ldflags != build_ldflags: # we will be creating a new target name, and using that for the # install link. That stops us from overwriting the existing build # target, which has different ldflags self.done_install_library = True t = self.clone(self.env) t.posted = False t.target += '.inst' t.name = self.name + '.inst' self.env.RPATH = build_ldflags else: t = self t.env.RPATH = install_ldflags dev_link = None # in the following the names are: # - inst_name is the name with .inst. in it, in the build # directory # - install_name is the name in the install directory # - install_link is a symlink in the install directory, to install_name if getattr(self, 'samba_realname', None): install_name = self.samba_realname install_link = None if getattr(self, 'soname', ''): install_link = self.soname if getattr(self, 'samba_type', None) == 'PYTHON': inst_name = bld.make_libname(t.target, nolibprefix=True, python=True) else: inst_name = bld.make_libname(t.target) elif self.vnum: vnum_base = self.vnum.split('.')[0] install_name = bld.make_libname(target_name, version=self.vnum) install_link = bld.make_libname(target_name, version=vnum_base) inst_name = bld.make_libname(t.target) if not self.private_library or not t.env.SONAME_ST: # only generate the dev link for non-bundled libs dev_link = bld.make_libname(target_name) elif getattr(self, 'soname', ''): install_name = bld.make_libname(target_name) install_link = self.soname inst_name = bld.make_libname(t.target) else: install_name = bld.make_libname(target_name) install_link = None inst_name = bld.make_libname(t.target) if t.env.SONAME_ST: # ensure we get the right names in the library if install_link: t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link) else: t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name) t.env.SONAME_ST = '' # tell waf to install the library bld.install_as(os.path.join(install_path, install_name), self.path.find_or_declare(inst_name), chmod=MODE_755) if install_link and install_link != install_name: # and the symlink if needed bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name)) if dev_link: bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name)) finally: bld.all_envs['default'] = default_env @feature('cshlib') @after('apply_implib') @before('apply_vnum') def apply_soname(self): '''install a library, taking account of the different rpath variants''' if self.env.SONAME_ST and getattr(self, 'soname', ''): self.env.append_value('LINKFLAGS', self.env.SONAME_ST % self.soname) self.env.SONAME_ST = '' @feature('cshlib') @after('apply_implib') @before('apply_vnum') def apply_vscript(self): '''add version-script arguments to library build''' if self.env.HAVE_LD_VERSION_SCRIPT and getattr(self, 'version_script', ''): self.env.append_value('LINKFLAGS', "-Wl,--version-script=%s" % self.version_script) self.version_script = None ############################## # handle the creation of links for libraries and binaries in the build tree @feature('symlink_lib') @after('apply_link') def symlink_lib(self): '''symlink a shared lib''' if self.target.endswith('.inst'): return blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env)) libpath = self.link_task.outputs[0].abspath(self.env) # calculat the link target and put it in the environment soext="" vnum = getattr(self, 'vnum', None) if vnum is not None: soext = '.' + vnum.split('.')[0] link_target = getattr(self, 'link_name', '') if link_target == '': basename = os.path.basename(self.bld.make_libname(self.target, version=soext)) if getattr(self, "private_library", False): link_target = '%s/private/%s' % (LIB_PATH, basename) else: link_target = '%s/%s' % (LIB_PATH, basename) link_target = os.path.join(blddir, link_target) if os.path.lexists(link_target): if os.path.islink(link_target) and os.readlink(link_target) == libpath: return os.unlink(link_target) link_container = os.path.dirname(link_target) if not os.path.isdir(link_container): os.makedirs(link_container) os.symlink(libpath, link_target) @feature('symlink_bin') @after('apply_link') def symlink_bin(self): '''symlink a binary into the build directory''' if self.target.endswith('.inst'): return if not self.link_task.outputs or not self.link_task.outputs[0]: raise Errors.WafError('no outputs found for %s in symlink_bin' % self.name) binpath = self.link_task.outputs[0].abspath(self.env) bldpath = os.path.join(self.bld.env.BUILD_DIRECTORY, self.link_task.outputs[0].name) if os.path.lexists(bldpath): if os.path.islink(bldpath) and os.readlink(bldpath) == binpath: return os.unlink(bldpath) os.symlink(binpath, bldpath) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4137106 tevent-0.11.0/buildtools/wafsamba/samba_patterns.py0000660000000000000000000002342600000000000022360 0ustar00rootroot00000000000000# a waf tool to add extension based build patterns for Samba import sys from waflib import Build from wafsamba import samba_version_file def write_version_header(task): '''print version.h contents''' src = task.inputs[0].srcpath(task.env) version = samba_version_file(src, task.env.srcdir, env=task.env, is_install=task.generator.bld.is_install) string = str(version) task.outputs[0].write(string) return 0 def SAMBA_MKVERSION(bld, target, source='VERSION'): '''generate the version.h header for Samba''' # We only force waf to re-generate this file if we are installing, # because only then is information not included in the deps (the # git revision) included in the version. t = bld.SAMBA_GENERATOR('VERSION', rule=write_version_header, group='setup', source=source, target=target, always=bld.is_install) Build.BuildContext.SAMBA_MKVERSION = SAMBA_MKVERSION def write_build_options_header(fp): '''write preamble for build_options.c''' fp.write("/*\n") fp.write(" Unix SMB/CIFS implementation.\n") fp.write(" Build Options for Samba Suite\n") fp.write(" Copyright (C) Vance Lankhaar 2003\n") fp.write(" Copyright (C) Andrew Bartlett 2001\n") fp.write("\n") fp.write(" This program is free software; you can redistribute it and/or modify\n") fp.write(" it under the terms of the GNU General Public License as published by\n") fp.write(" the Free Software Foundation; either version 3 of the License, or\n") fp.write(" (at your option) any later version.\n") fp.write("\n") fp.write(" This program is distributed in the hope that it will be useful,\n") fp.write(" but WITHOUT ANY WARRANTY; without even the implied warranty of\n") fp.write(" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n") fp.write(" GNU General Public License for more details.\n") fp.write("\n") fp.write(" You should have received a copy of the GNU General Public License\n") fp.write(" along with this program; if not, see .\n") fp.write("*/\n") fp.write("\n") fp.write("#include \"includes.h\"\n") fp.write("#include \"dynconfig/dynconfig.h\"\n") fp.write("#include \"lib/cluster_support.h\"\n") fp.write("\n") fp.write("static int output(bool screen, const char *format, ...) PRINTF_ATTRIBUTE(2,3);\n") fp.write("void build_options(bool screen);\n") fp.write("\n") fp.write("\n") fp.write("/****************************************************************************\n") fp.write("helper function for build_options\n") fp.write("****************************************************************************/\n") fp.write("static int output(bool screen, const char *format, ...)\n") fp.write("{\n") fp.write(" char *ptr = NULL;\n") fp.write(" int ret = 0;\n") fp.write(" va_list ap;\n") fp.write(" \n") fp.write(" va_start(ap, format);\n") fp.write(" ret = vasprintf(&ptr,format,ap);\n") fp.write(" va_end(ap);\n") fp.write("\n") fp.write(" if (screen) {\n") fp.write(" d_printf(\"%s\", ptr ? ptr : \"\");\n") fp.write(" } else {\n") fp.write(" DEBUG(4,(\"%s\", ptr ? ptr : \"\"));\n") fp.write(" }\n") fp.write(" \n") fp.write(" SAFE_FREE(ptr);\n") fp.write(" return ret;\n") fp.write("}\n") fp.write("\n") fp.write("/****************************************************************************\n") fp.write("options set at build time for the samba suite\n") fp.write("****************************************************************************/\n") fp.write("void build_options(bool screen)\n") fp.write("{\n") fp.write(" if ((DEBUGLEVEL < 4) && (!screen)) {\n") fp.write(" return;\n") fp.write(" }\n") fp.write("\n") fp.write("\n") fp.write(" /* Output various paths to files and directories */\n") fp.write(" output(screen,\"\\nPaths:\\n\");\n") fp.write(" output(screen,\" SBINDIR: %s\\n\", get_dyn_SBINDIR());\n") fp.write(" output(screen,\" BINDIR: %s\\n\", get_dyn_BINDIR());\n") fp.write(" output(screen,\" CONFIGFILE: %s\\n\", get_dyn_CONFIGFILE());\n") fp.write(" output(screen,\" LOGFILEBASE: %s\\n\", get_dyn_LOGFILEBASE());\n") fp.write(" output(screen,\" LMHOSTSFILE: %s\\n\",get_dyn_LMHOSTSFILE());\n") fp.write(" output(screen,\" LIBDIR: %s\\n\",get_dyn_LIBDIR());\n") fp.write(" output(screen,\" DATADIR: %s\\n\",get_dyn_DATADIR());\n") fp.write(" output(screen,\" SAMBA_DATADIR: %s\\n\",get_dyn_SAMBA_DATADIR());\n") fp.write(" output(screen,\" MODULESDIR: %s\\n\",get_dyn_MODULESDIR());\n") fp.write(" output(screen,\" SHLIBEXT: %s\\n\",get_dyn_SHLIBEXT());\n") fp.write(" output(screen,\" LOCKDIR: %s\\n\",get_dyn_LOCKDIR());\n") fp.write(" output(screen,\" STATEDIR: %s\\n\",get_dyn_STATEDIR());\n") fp.write(" output(screen,\" CACHEDIR: %s\\n\",get_dyn_CACHEDIR());\n") fp.write(" output(screen,\" PIDDIR: %s\\n\", get_dyn_PIDDIR());\n") fp.write(" output(screen,\" SMB_PASSWD_FILE: %s\\n\",get_dyn_SMB_PASSWD_FILE());\n") fp.write(" output(screen,\" PRIVATE_DIR: %s\\n\",get_dyn_PRIVATE_DIR());\n") fp.write(" output(screen,\" BINDDNS_DIR: %s\\n\",get_dyn_BINDDNS_DIR());\n") fp.write("\n") def write_build_options_footer(fp): fp.write(" /* Output the sizes of the various cluster features */\n") fp.write(" output(screen, \"\\n%s\", cluster_support_features());\n") fp.write("\n") fp.write(" /* Output the sizes of the various types */\n") fp.write(" output(screen, \"\\nType sizes:\\n\");\n") fp.write(" output(screen, \" sizeof(char): %lu\\n\",(unsigned long)sizeof(char));\n") fp.write(" output(screen, \" sizeof(int): %lu\\n\",(unsigned long)sizeof(int));\n") fp.write(" output(screen, \" sizeof(long): %lu\\n\",(unsigned long)sizeof(long));\n") fp.write(" output(screen, \" sizeof(long long): %lu\\n\",(unsigned long)sizeof(long long));\n") fp.write(" output(screen, \" sizeof(uint8_t): %lu\\n\",(unsigned long)sizeof(uint8_t));\n") fp.write(" output(screen, \" sizeof(uint16_t): %lu\\n\",(unsigned long)sizeof(uint16_t));\n") fp.write(" output(screen, \" sizeof(uint32_t): %lu\\n\",(unsigned long)sizeof(uint32_t));\n") fp.write(" output(screen, \" sizeof(short): %lu\\n\",(unsigned long)sizeof(short));\n") fp.write(" output(screen, \" sizeof(void*): %lu\\n\",(unsigned long)sizeof(void*));\n") fp.write(" output(screen, \" sizeof(size_t): %lu\\n\",(unsigned long)sizeof(size_t));\n") fp.write(" output(screen, \" sizeof(off_t): %lu\\n\",(unsigned long)sizeof(off_t));\n") fp.write(" output(screen, \" sizeof(ino_t): %lu\\n\",(unsigned long)sizeof(ino_t));\n") fp.write(" output(screen, \" sizeof(dev_t): %lu\\n\",(unsigned long)sizeof(dev_t));\n") fp.write("\n") fp.write(" output(screen, \"\\nBuiltin modules:\\n\");\n") fp.write(" output(screen, \" %s\\n\", STRING_STATIC_MODULES);\n") fp.write("}\n") def write_build_options_section(fp, keys, section): fp.write("\n\t/* Show %s */\n" % section) fp.write(" output(screen, \"\\n%s:\\n\");\n\n" % section) for k in sorted(keys): fp.write("#ifdef %s\n" % k) fp.write(" output(screen, \" %s\\n\");\n" % k) fp.write("#endif\n") fp.write("\n") def write_build_options(task): tbl = task.env keys_option_with = [] keys_option_utmp = [] keys_option_have = [] keys_header_sys = [] keys_header_other = [] keys_misc = [] if sys.hexversion>0x300000f: trans_table = bytes.maketrans(b'.-()', b'____') else: import string trans_table = string.maketrans('.-()', '____') for key in tbl: if key.startswith("HAVE_UT_UT_") or key.find("UTMP") >= 0: keys_option_utmp.append(key) elif key.startswith("WITH_"): keys_option_with.append(key) elif key.startswith("HAVE_SYS_"): keys_header_sys.append(key) elif key.startswith("HAVE_"): if key.endswith("_H"): keys_header_other.append(key) else: keys_option_have.append(key) elif key.startswith("static_init_"): l = key.split("(") keys_misc.append(l[0]) else: keys_misc.append(key.translate(trans_table)) tgt = task.outputs[0].bldpath(task.env) f = open(tgt, 'w') write_build_options_header(f) write_build_options_section(f, keys_header_sys, "System Headers") write_build_options_section(f, keys_header_other, "Headers") write_build_options_section(f, keys_option_utmp, "UTMP Options") write_build_options_section(f, keys_option_have, "HAVE_* Defines") write_build_options_section(f, keys_option_with, "--with Options") write_build_options_section(f, keys_misc, "Build Options") write_build_options_footer(f) f.close() return 0 def SAMBA_BLDOPTIONS(bld, target): '''generate the bld_options.c for Samba''' t = bld.SAMBA_GENERATOR(target, rule=write_build_options, dep_vars=['defines'], target=target) Build.BuildContext.SAMBA_BLDOPTIONS = SAMBA_BLDOPTIONS ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.853205 tevent-0.11.0/buildtools/wafsamba/samba_perl.py0000660000000000000000000000410300000000000021451 0ustar00rootroot00000000000000from waflib import Utils from waflib.Configure import conf from samba_utils import get_string done = {} @conf def SAMBA_CHECK_PERL(conf, mandatory=True, version=(5,0,0)): if "done" in done: return done["done"] = True conf.find_program('perl', var='PERL', mandatory=mandatory) conf.load('perl') path_perl = conf.find_program('perl') conf.env.PERL_SPECIFIED = (conf.env.PERL != path_perl) conf.check_perl_version(version) def read_perl_config_var(cmd): output = Utils.cmd_output([conf.env.get_flat('PERL'), '-MConfig', '-e', cmd]) if not isinstance(output, str): output = get_string(output) return Utils.to_list(output) def check_perl_config_var(var): conf.start_msg("Checking for perl $Config{%s}:" % var) try: v = read_perl_config_var('print $Config{%s}' % var)[0] conf.end_msg("'%s'" % (v), 'GREEN') return v except IndexError: conf.end_msg(False, 'YELLOW') pass return None vendor_prefix = check_perl_config_var('vendorprefix') perl_arch_install_dir = None if vendor_prefix == conf.env.PREFIX: perl_arch_install_dir = check_perl_config_var('vendorarch'); if perl_arch_install_dir is None: perl_arch_install_dir = "${LIBDIR}/perl5"; conf.start_msg("PERL_ARCH_INSTALL_DIR: ") conf.end_msg("'%s'" % (perl_arch_install_dir), 'GREEN') conf.env.PERL_ARCH_INSTALL_DIR = perl_arch_install_dir perl_lib_install_dir = None if vendor_prefix == conf.env.PREFIX: perl_lib_install_dir = check_perl_config_var('vendorlib'); if perl_lib_install_dir is None: perl_lib_install_dir = "${DATADIR}/perl5"; conf.start_msg("PERL_LIB_INSTALL_DIR: ") conf.end_msg("'%s'" % (perl_lib_install_dir), 'GREEN') conf.env.PERL_LIB_INSTALL_DIR = perl_lib_install_dir perl_inc = read_perl_config_var('print "@INC"') if '.' in perl_inc: perl_inc.remove('.') conf.start_msg("PERL_INC: ") conf.end_msg("%s" % (perl_inc), 'GREEN') conf.env.PERL_INC = perl_inc ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_pidl.py0000660000000000000000000001525400000000000021450 0ustar00rootroot00000000000000# waf build tool for building IDL files with pidl import os from waflib import Build, Utils from waflib.TaskGen import feature, before from samba_utils import SET_TARGET_TYPE, TO_LIST, LOCAL_CACHE def SAMBA_PIDL(bld, pname, source, options='', output_dir='.', generate_tables=True): '''Build a IDL file using pidl. This will produce up to 13 output files depending on the options used''' bname = source[0:-4]; # strip off the .idl suffix bname = os.path.basename(bname) name = "%s_%s" % (pname, bname.upper()) if not SET_TARGET_TYPE(bld, name, 'PIDL'): return bld.SET_BUILD_GROUP('build_source') # the output files depend on the options used. Use this dictionary # to map between the options and the resulting file names options_map = { '--header' : '%s.h', '--ndr-parser' : 'ndr_%s.c ndr_%s.h', '--samba3-ndr-server' : 'srv_%s.c srv_%s.h', '--samba3-ndr-client' : 'cli_%s.c cli_%s.h', '--server' : 'ndr_%s_s.c', '--server-compat' : 'ndr_%s_scompat.c ndr_%s_scompat.h', '--client' : 'ndr_%s_c.c ndr_%s_c.h', '--python' : 'py_%s.c', '--tdr-parser' : 'tdr_%s.c tdr_%s.h', '--dcom-proxy' : '%s_p.c', '--com-header' : 'com_%s.h' } table_header_idx = None out_files = [] options_list = TO_LIST(options) for o in options_list: if o in options_map: ofiles = TO_LIST(options_map[o]) for f in ofiles: out_files.append(os.path.join(output_dir, f % bname)) if f == 'ndr_%s.h': # remember this one for the tables generation table_header_idx = len(out_files) - 1 # depend on the full pidl sources source = TO_LIST(source) try: pidl_src_nodes = bld.pidl_files_cache except AttributeError: bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False) bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False)) pidl_src_nodes = bld.pidl_files_cache # the cd .. is needed because pidl currently is sensitive to the directory it is run in cpp = "" cc = "" if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "": if isinstance(bld.CONFIG_GET("CPP"), list): cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP")) else: cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP") if cpp == "CPP=xlc_r": cpp = "" if bld.env['PIDL_DEVELOPER_MODE']: pidl_dev = 'PIDL_DEVELOPER=1 ' else: pidl_dev = '' if bld.CONFIG_SET("CC"): if isinstance(bld.CONFIG_GET("CC"), list): cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC")) else: cc = 'CC="%s"' % bld.CONFIG_GET("CC") t = bld(rule='cd ${PIDL_LAUNCH_DIR} && %s%s %s ${PERL} ${PIDL} --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${IDLSRC}"' % (pidl_dev, cpp, cc), ext_out = '.c', before = 'c', update_outputs = True, shell = True, source = source, target = out_files, name = name, samba_type = 'PIDL') t.env.PIDL_LAUNCH_DIR = bld.srcnode.path_from(bld.bldnode) pnode = bld.srcnode.find_resource('pidl/pidl') t.env.PIDL = pnode.path_from(bld.srcnode) t.env.OPTIONS = TO_LIST(options) snode = t.path.find_resource(source[0]) t.env.IDLSRC = snode.path_from(bld.srcnode) t.env.OUTPUTDIR = bld.bldnode.path_from(bld.srcnode) + '/' + bld.path.find_dir(output_dir).path_from(bld.srcnode) bld.add_manual_dependency(snode, pidl_src_nodes) if generate_tables and table_header_idx is not None: pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS') pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])] t.more_includes = '#' + bld.path.path_from(bld.srcnode) Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL def SAMBA_PIDL_LIST(bld, name, source, options='', output_dir='.', generate_tables=True, generate_fuzzers=True): '''A wrapper for building a set of IDL files''' for p in TO_LIST(source): bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables) # Some IDL files don't exactly match between name and # "interface" so we need a way to skip those, while other IDL # files have the table generation skipped entirely, on which # the fuzzers rely if generate_tables and generate_fuzzers: interface = p[0:-4] # strip off the .idl suffix bld.SAMBA_NDR_FUZZ(interface, auto_deps=True, fuzz_type="TYPE_STRUCT") # Only generate the TYPE_STRUCT fuzzer if this isn't # really DCE/RPC if '--client' in options: bld.SAMBA_NDR_FUZZ(interface, auto_deps=True, fuzz_type="TYPE_IN") bld.SAMBA_NDR_FUZZ(interface, auto_deps=True, fuzz_type="TYPE_OUT") Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST ################################################################# # the rule for generating the NDR tables @feature('collect') @before('exec_rule') def collect(self): pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS') # The first source is tables.pl itself self.source = Utils.to_list(self.source) for (name, hd) in pidl_headers.items(): y = self.bld.get_tgen_by_name(name) self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name) y.post() for node in hd: self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name) self.source.append(node) def SAMBA_PIDL_TABLES(bld, name, target): '''generate the pidl NDR tables file''' bld.SET_BUILD_GROUP('main') t = bld( features = 'collect', rule = '${PERL} ${SRC} > ${TGT}', ext_out = '.c', before = 'c', update_outputs = True, shell = True, source = '../../librpc/tables.pl', target = target, name = name) t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc') Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4137106 tevent-0.11.0/buildtools/wafsamba/samba_python.py0000660000000000000000000001210700000000000022033 0ustar00rootroot00000000000000# waf build tool for building IDL files with pidl import os, sys from waflib import Build, Logs, Utils, Configure, Errors from waflib.Configure import conf @conf def SAMBA_CHECK_PYTHON(conf, version=(3,6,0)): if conf.env.enable_fuzzing: version=(3,5,0) # enable tool to build python extensions if conf.env.HAVE_PYTHON_H: conf.check_python_version(version) return interpreters = [] conf.find_program('python3', var='PYTHON', mandatory=not conf.env.disable_python) conf.load('python') path_python = conf.find_program('python3') conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python) conf.check_python_version(version) interpreters.append(conf.env['PYTHON']) conf.env.python_interpreters = interpreters @conf def SAMBA_CHECK_PYTHON_HEADERS(conf): if conf.env.disable_python: conf.msg("python headers", "Check disabled due to --disable-python") # we don't want PYTHONDIR in config.h, as otherwise changing # --prefix causes a complete rebuild conf.env.DEFINES = [x for x in conf.env.DEFINES if not x.startswith('PYTHONDIR=') and not x.startswith('PYTHONARCHDIR=')] return if conf.env["python_headers_checked"] == []: _check_python_headers(conf) conf.env["python_headers_checked"] = "yes" else: conf.msg("python headers", "using cache") # we don't want PYTHONDIR in config.h, as otherwise changing # --prefix causes a complete rebuild conf.env.DEFINES = [x for x in conf.env.DEFINES if not x.startswith('PYTHONDIR=') and not x.startswith('PYTHONARCHDIR=')] def _check_python_headers(conf): conf.check_python_headers() abi_pattern = os.path.splitext(conf.env['pyext_PATTERN'])[0] conf.env['PYTHON_SO_ABI_FLAG'] = abi_pattern % '' conf.env['PYTHON_LIBNAME_SO_ABI_FLAG'] = ( conf.env['PYTHON_SO_ABI_FLAG'].replace('_', '-')) for lib in conf.env['LINKFLAGS_PYEMBED']: if lib.startswith('-L'): conf.env.append_unique('LIBPATH_PYEMBED', lib[2:]) # strip '-L' conf.env['LINKFLAGS_PYEMBED'].remove(lib) # same as in waf 1.5, keep only '-fno-strict-aliasing' # and ignore defines such as NDEBUG _FORTIFY_SOURCE=2 conf.env.DEFINES_PYEXT = [] conf.env.CFLAGS_PYEXT = ['-fno-strict-aliasing'] return def PYTHON_BUILD_IS_ENABLED(self): return self.CONFIG_SET('HAVE_PYTHON_H') Build.BuildContext.PYTHON_BUILD_IS_ENABLED = PYTHON_BUILD_IS_ENABLED def SAMBA_PYTHON(bld, name, source='', deps='', public_deps='', realname=None, cflags='', cflags_end=None, includes='', init_function_sentinel=None, local_include=True, vars=None, install=True, enabled=True): '''build a python extension for Samba''' # force-disable when we can't build python modules, so # every single call doesn't need to pass this in. if not bld.PYTHON_BUILD_IS_ENABLED(): enabled = False # Save time, no need to build python bindings when fuzzing if bld.env.enable_fuzzing: enabled = False # when we support static python modules we'll need to gather # the list from all the SAMBA_PYTHON() targets if init_function_sentinel is not None: cflags += ' -DSTATIC_LIBPYTHON_MODULES=%s' % init_function_sentinel # From https://docs.python.org/2/c-api/arg.html: # Starting with Python 2.5 the type of the length argument to # PyArg_ParseTuple(), PyArg_ParseTupleAndKeywords() and PyArg_Parse() # can be controlled by defining the macro PY_SSIZE_T_CLEAN before # including Python.h. If the macro is defined, length is a Py_ssize_t # rather than an int. # Because if often included before includes.h/config.h # This must be in the -D compiler options cflags += ' -DPY_SSIZE_T_CLEAN=1' source = bld.EXPAND_VARIABLES(source, vars=vars) if realname is not None: link_name = 'python/%s' % realname else: link_name = None bld.SAMBA_LIBRARY(name, source=source, deps=deps, public_deps=public_deps, includes=includes, cflags=cflags, cflags_end=cflags_end, local_include=local_include, vars=vars, realname=realname, link_name=link_name, pyext=True, target_type='PYTHON', install_path='${PYTHONARCHDIR}', allow_undefined_symbols=True, install=install, enabled=enabled) Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON def pyembed_libname(bld, name): if bld.env['PYTHON_SO_ABI_FLAG']: return name + bld.env['PYTHON_SO_ABI_FLAG'] else: return name Build.BuildContext.pyembed_libname = pyembed_libname ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1156025 tevent-0.11.0/buildtools/wafsamba/samba_third_party.py0000660000000000000000000000273700000000000023053 0ustar00rootroot00000000000000# functions to support third party libraries import os from waflib import Utils, Build, Context from waflib.Configure import conf @conf def CHECK_FOR_THIRD_PARTY(conf): return os.path.exists(os.path.join(Context.g_module.top, 'third_party')) Build.BuildContext.CHECK_FOR_THIRD_PARTY = CHECK_FOR_THIRD_PARTY @conf def CHECK_POPT(conf): return conf.CHECK_BUNDLED_SYSTEM('popt', checkfunctions='poptGetContext', headers='popt.h') Build.BuildContext.CHECK_POPT = CHECK_POPT @conf def CHECK_CMOCKA(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('cmocka', minversion='1.1.3') Build.BuildContext.CHECK_CMOCKA = CHECK_CMOCKA @conf def CHECK_SOCKET_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('socket_wrapper', minversion='1.3.3') Build.BuildContext.CHECK_SOCKET_WRAPPER = CHECK_SOCKET_WRAPPER @conf def CHECK_NSS_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('nss_wrapper', minversion='1.1.11') Build.BuildContext.CHECK_NSS_WRAPPER = CHECK_NSS_WRAPPER @conf def CHECK_RESOLV_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('resolv_wrapper', minversion='1.1.7') Build.BuildContext.CHECK_RESOLV_WRAPPER = CHECK_RESOLV_WRAPPER @conf def CHECK_UID_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('uid_wrapper', minversion='1.2.7') Build.BuildContext.CHECK_UID_WRAPPER = CHECK_UID_WRAPPER @conf def CHECK_PAM_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('pam_wrapper', minversion='1.1.2') Build.BuildContext.CHECK_PAM_WRAPPER = CHECK_PAM_WRAPPER ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1156025 tevent-0.11.0/buildtools/wafsamba/samba_utils.py0000660000000000000000000006011600000000000021655 0ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import errno import os, sys, re, fnmatch, shlex, inspect from optparse import SUPPRESS_HELP from waflib import Build, Options, Utils, Task, Logs, Configure, Errors, Context from waflib import Scripting from waflib.TaskGen import feature, before, after from waflib.Configure import ConfigurationContext from waflib.Logs import debug from waflib import ConfigSet from waflib.Build import CACHE_SUFFIX # TODO: make this a --option LIB_PATH="shared" PY3 = sys.version_info[0] == 3 if PY3: # helper function to get a string from a variable that maybe 'str' or # 'bytes' if 'bytes' then it is decoded using 'utf8'. If 'str' is passed # it is returned unchanged # Using this function is PY2/PY3 code should ensure in most cases # the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly # decodes the variable (see PY2 implementation of this function below) def get_string(bytesorstring): tmp = bytesorstring if isinstance(bytesorstring, bytes): tmp = bytesorstring.decode('utf8') elif not isinstance(bytesorstring, str): raise ValueError('Expected byte of string for %s:%s' % (type(bytesorstring), bytesorstring)) return tmp else: # Helper function to return string. # if 'str' or 'unicode' passed in they are returned unchanged # otherwise an exception is generated # Using this function is PY2/PY3 code should ensure in most cases # the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly # decodes the variable (see PY3 implementation of this function above) def get_string(bytesorstring): tmp = bytesorstring if not(isinstance(bytesorstring, str) or isinstance(bytesorstring, unicode)): raise ValueError('Expected str or unicode for %s:%s' % (type(bytesorstring), bytesorstring)) return tmp # sigh, python octal constants are a mess MODE_644 = int('644', 8) MODE_744 = int('744', 8) MODE_755 = int('755', 8) MODE_777 = int('777', 8) def conf(f): # override in order to propagate the argument "mandatory" def fun(*k, **kw): mandatory = True if 'mandatory' in kw: mandatory = kw['mandatory'] del kw['mandatory'] try: return f(*k, **kw) except Errors.ConfigurationError: if mandatory: raise fun.__name__ = f.__name__ if 'mandatory' in inspect.getsource(f): fun = f setattr(Configure.ConfigurationContext, f.__name__, fun) setattr(Build.BuildContext, f.__name__, fun) return f Configure.conf = conf Configure.conftest = conf @conf def SET_TARGET_TYPE(ctx, target, value): '''set the target type of a target''' cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') if target in cache and cache[target] != 'EMPTY': Logs.error("ERROR: Target '%s' in directory %s re-defined as %s - was %s" % (target, ctx.path.abspath(), value, cache[target])) sys.exit(1) LOCAL_CACHE_SET(ctx, 'TARGET_TYPE', target, value) debug("task_gen: Target '%s' created of type '%s' in %s" % (target, value, ctx.path.abspath())) return True def GET_TARGET_TYPE(ctx, target): '''get target type from cache''' cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') if not target in cache: return None return cache[target] def ADD_LD_LIBRARY_PATH(path): '''add something to LD_LIBRARY_PATH''' if 'LD_LIBRARY_PATH' in os.environ: oldpath = os.environ['LD_LIBRARY_PATH'] else: oldpath = '' newpath = oldpath.split(':') if not path in newpath: newpath.append(path) os.environ['LD_LIBRARY_PATH'] = ':'.join(newpath) def needs_private_lib(bld, target): '''return True if a target links to a private library''' for lib in getattr(target, "final_libs", []): t = bld.get_tgen_by_name(lib) if t and getattr(t, 'private_library', False): return True return False def install_rpath(target): '''the rpath value for installation''' bld = target.bld bld.env['RPATH'] = [] ret = set() if bld.env.RPATH_ON_INSTALL: ret.add(bld.EXPAND_VARIABLES(bld.env.LIBDIR)) if bld.env.RPATH_ON_INSTALL_PRIVATE and needs_private_lib(bld, target): ret.add(bld.EXPAND_VARIABLES(bld.env.PRIVATELIBDIR)) return list(ret) def build_rpath(bld): '''the rpath value for build''' rpaths = [os.path.normpath('%s/%s' % (bld.env.BUILD_DIRECTORY, d)) for d in ("shared", "shared/private")] bld.env['RPATH'] = [] if bld.env.RPATH_ON_BUILD: return rpaths for rpath in rpaths: ADD_LD_LIBRARY_PATH(rpath) return [] @conf def LOCAL_CACHE(ctx, name): '''return a named build cache dictionary, used to store state inside other functions''' if name in ctx.env: return ctx.env[name] ctx.env[name] = {} return ctx.env[name] @conf def LOCAL_CACHE_SET(ctx, cachename, key, value): '''set a value in a local cache''' cache = LOCAL_CACHE(ctx, cachename) cache[key] = value @conf def ASSERT(ctx, expression, msg): '''a build assert call''' if not expression: raise Errors.WafError("ERROR: %s\n" % msg) Build.BuildContext.ASSERT = ASSERT def SUBDIR(bld, subdir, list): '''create a list of files by pre-pending each with a subdir name''' ret = '' for l in TO_LIST(list): ret = ret + os.path.normpath(os.path.join(subdir, l)) + ' ' return ret Build.BuildContext.SUBDIR = SUBDIR def dict_concat(d1, d2): '''concatenate two dictionaries d1 += d2''' for t in d2: if t not in d1: d1[t] = d2[t] def ADD_COMMAND(opt, name, function): '''add a new top level command to waf''' Context.g_module.__dict__[name] = function opt.name = function Options.OptionsContext.ADD_COMMAND = ADD_COMMAND @feature('c', 'cc', 'cshlib', 'cprogram') @before('apply_core','exec_rule') def process_depends_on(self): '''The new depends_on attribute for build rules allow us to specify a dependency on output from a source generation rule''' if getattr(self , 'depends_on', None): lst = self.to_list(self.depends_on) for x in lst: y = self.bld.get_tgen_by_name(x) self.bld.ASSERT(y is not None, "Failed to find dependency %s of %s" % (x, self.name)) y.post() if getattr(y, 'more_includes', None): self.includes += " " + y.more_includes def unique_list(seq): '''return a uniquified list in the same order as the existing list''' seen = {} result = [] for item in seq: if item in seen: continue seen[item] = True result.append(item) return result def TO_LIST(str, delimiter=None): '''Split a list, preserving quoted strings and existing lists''' if str is None: return [] if isinstance(str, list): # we need to return a new independent list... return list(str) if len(str) == 0: return [] lst = str.split(delimiter) # the string may have had quotes in it, now we # check if we did have quotes, and use the slower shlex # if we need to for e in lst: if e[0] == '"': return shlex.split(str) return lst def subst_vars_error(string, env): '''substitute vars, throw an error if a variable is not defined''' lst = re.split('(\$\{\w+\})', string) out = [] for v in lst: if re.match('\$\{\w+\}', v): vname = v[2:-1] if not vname in env: raise KeyError("Failed to find variable %s in %s in env %s <%s>" % (vname, string, env.__class__, str(env))) v = env[vname] if isinstance(v, list): v = ' '.join(v) out.append(v) return ''.join(out) @conf def SUBST_ENV_VAR(ctx, varname): '''Substitute an environment variable for any embedded variables''' return subst_vars_error(ctx.env[varname], ctx.env) Build.BuildContext.SUBST_ENV_VAR = SUBST_ENV_VAR def recursive_dirlist(dir, relbase, pattern=None): '''recursive directory list''' ret = [] for f in os.listdir(dir): f2 = dir + '/' + f if os.path.isdir(f2): ret.extend(recursive_dirlist(f2, relbase)) else: if pattern and not fnmatch.fnmatch(f, pattern): continue ret.append(os.path.relpath(f2, relbase)) return ret def symlink(src, dst, force=True): """Can create symlink by force""" try: os.symlink(src, dst) except OSError as exc: if exc.errno == errno.EEXIST and force: os.remove(dst) os.symlink(src, dst) else: raise def mkdir_p(dir): '''like mkdir -p''' if not dir: return if dir.endswith("/"): mkdir_p(dir[:-1]) return if os.path.isdir(dir): return mkdir_p(os.path.dirname(dir)) os.mkdir(dir) def SUBST_VARS_RECURSIVE(string, env): '''recursively expand variables''' if string is None: return string limit=100 while (string.find('${') != -1 and limit > 0): string = subst_vars_error(string, env) limit -= 1 return string @conf def EXPAND_VARIABLES(ctx, varstr, vars=None): '''expand variables from a user supplied dictionary This is most useful when you pass vars=locals() to expand all your local variables in strings ''' if isinstance(varstr, list): ret = [] for s in varstr: ret.append(EXPAND_VARIABLES(ctx, s, vars=vars)) return ret if not isinstance(varstr, str): return varstr env = ConfigSet.ConfigSet() ret = varstr # substitute on user supplied dict if avaiilable if vars is not None: for v in vars.keys(): env[v] = vars[v] ret = SUBST_VARS_RECURSIVE(ret, env) # if anything left, subst on the environment as well if ret.find('${') != -1: ret = SUBST_VARS_RECURSIVE(ret, ctx.env) # make sure there is nothing left. Also check for the common # typo of $( instead of ${ if ret.find('${') != -1 or ret.find('$(') != -1: Logs.error('Failed to substitute all variables in varstr=%s' % ret) sys.exit(1) return ret Build.BuildContext.EXPAND_VARIABLES = EXPAND_VARIABLES def RUN_COMMAND(cmd, env=None, shell=False): '''run a external command, return exit code or signal''' if env: cmd = SUBST_VARS_RECURSIVE(cmd, env) status = os.system(cmd) if os.WIFEXITED(status): return os.WEXITSTATUS(status) if os.WIFSIGNALED(status): return - os.WTERMSIG(status) Logs.error("Unknown exit reason %d for command: %s" % (status, cmd)) return -1 def RUN_PYTHON_TESTS(testfiles, pythonpath=None, extra_env=None): env = LOAD_ENVIRONMENT() if pythonpath is None: pythonpath = os.path.join(Context.g_module.out, 'python') result = 0 for interp in env.python_interpreters: if not isinstance(interp, str): interp = ' '.join(interp) for testfile in testfiles: cmd = "PYTHONPATH=%s %s %s" % (pythonpath, interp, testfile) if extra_env: for key, value in extra_env.items(): cmd = "%s=%s %s" % (key, value, cmd) print('Running Python test with %s: %s' % (interp, testfile)) ret = RUN_COMMAND(cmd) if ret: print('Python test failed: %s' % cmd) result = ret return result # make sure we have md5. some systems don't have it try: from hashlib import md5 # Even if hashlib.md5 exists, it may be unusable. # Try to use MD5 function. In FIPS mode this will cause an exception # and we'll get to the replacement code foo = md5(b'abcd') except: try: import md5 # repeat the same check here, mere success of import is not enough. # Try to use MD5 function. In FIPS mode this will cause an exception foo = md5.md5(b'abcd') except: Context.SIG_NIL = hash('abcd') class replace_md5(object): def __init__(self): self.val = None def update(self, val): self.val = hash((self.val, val)) def digest(self): return str(self.val) def hexdigest(self): return self.digest().encode('hex') def replace_h_file(filename): f = open(filename, 'rb') m = replace_md5() while (filename): filename = f.read(100000) m.update(filename) f.close() return m.digest() Utils.md5 = replace_md5 Task.md5 = replace_md5 Utils.h_file = replace_h_file def LOAD_ENVIRONMENT(): '''load the configuration environment, allowing access to env vars from new commands''' env = ConfigSet.ConfigSet() try: p = os.path.join(Context.g_module.out, 'c4che/default'+CACHE_SUFFIX) env.load(p) except (OSError, IOError): pass return env def IS_NEWER(bld, file1, file2): '''return True if file1 is newer than file2''' curdir = bld.path.abspath() t1 = os.stat(os.path.join(curdir, file1)).st_mtime t2 = os.stat(os.path.join(curdir, file2)).st_mtime return t1 > t2 Build.BuildContext.IS_NEWER = IS_NEWER @conf def RECURSE(ctx, directory): '''recurse into a directory, relative to the curdir or top level''' try: visited_dirs = ctx.visited_dirs except AttributeError: visited_dirs = ctx.visited_dirs = set() d = os.path.join(ctx.path.abspath(), directory) if os.path.exists(d): abspath = os.path.abspath(d) else: abspath = os.path.abspath(os.path.join(Context.g_module.top, directory)) ctxclass = ctx.__class__.__name__ key = ctxclass + ':' + abspath if key in visited_dirs: # already done it return visited_dirs.add(key) relpath = os.path.relpath(abspath, ctx.path.abspath()) if ctxclass in ['OptionsContext', 'ConfigurationContext', 'BuildContext', 'CleanContext', 'InstallContext', 'UninstallContext', 'ListContext', 'ClangDbContext']: return ctx.recurse(relpath) if 'waflib.extras.compat15' in sys.modules: return ctx.recurse(relpath) Logs.error('Unknown RECURSE context class: {}'.format(ctxclass)) raise Options.OptionsContext.RECURSE = RECURSE Build.BuildContext.RECURSE = RECURSE def CHECK_MAKEFLAGS(options): '''check for MAKEFLAGS environment variable in case we are being called from a Makefile try to honor a few make command line flags''' if not 'WAF_MAKE' in os.environ: return makeflags = os.environ.get('MAKEFLAGS') if makeflags is None: makeflags = "" jobs_set = False jobs = None # we need to use shlex.split to cope with the escaping of spaces # in makeflags for opt in shlex.split(makeflags): # options can come either as -x or as x if opt[0:2] == 'V=': options.verbose = Logs.verbose = int(opt[2:]) if Logs.verbose > 0: Logs.zones = ['runner'] if Logs.verbose > 2: Logs.zones = ['*'] elif opt[0].isupper() and opt.find('=') != -1: # this allows us to set waf options on the make command line # for example, if you do "make FOO=blah", then we set the # option 'FOO' in Options.options, to blah. If you look in wafsamba/wscript # you will see that the command line accessible options have their dest= # set to uppercase, to allow for passing of options from make in this way # this is also how "make test TESTS=testpattern" works, and # "make VERBOSE=1" as well as things like "make SYMBOLCHECK=1" loc = opt.find('=') setattr(options, opt[0:loc], opt[loc+1:]) elif opt[0] != '-': for v in opt: if re.search(r'j[0-9]*$', v): jobs_set = True jobs = opt.strip('j') elif v == 'k': options.keep = True elif re.search(r'-j[0-9]*$', opt): jobs_set = True jobs = opt.strip('-j') elif opt == '-k': options.keep = True if not jobs_set: # default to one job options.jobs = 1 elif jobs_set and jobs: options.jobs = int(jobs) waflib_options_parse_cmd_args = Options.OptionsContext.parse_cmd_args def wafsamba_options_parse_cmd_args(self, _args=None, cwd=None, allow_unknown=False): (options, commands, envvars) = \ waflib_options_parse_cmd_args(self, _args=_args, cwd=cwd, allow_unknown=allow_unknown) CHECK_MAKEFLAGS(options) if options.jobs == 1: # # waflib.Runner.Parallel processes jobs inline if the possible number # of jobs is just 1. But (at least in waf <= 2.0.12) it still calls # create a waflib.Runner.Spawner() which creates a single # waflib.Runner.Consumer() thread that tries to process jobs from the # queue. # # This has strange effects, which are not noticed typically, # but at least on AIX python has broken threading and fails # in random ways. # # So we just add a dummy Spawner class. class NoOpSpawner(object): def __init__(self, master): return from waflib import Runner Runner.Spawner = NoOpSpawner return options, commands, envvars Options.OptionsContext.parse_cmd_args = wafsamba_options_parse_cmd_args option_groups = {} def option_group(opt, name): '''find or create an option group''' global option_groups if name in option_groups: return option_groups[name] gr = opt.add_option_group(name) option_groups[name] = gr return gr Options.OptionsContext.option_group = option_group def save_file(filename, contents, create_dir=False): '''save data to a file''' if create_dir: mkdir_p(os.path.dirname(filename)) try: f = open(filename, 'w') f.write(contents) f.close() except: return False return True def load_file(filename): '''return contents of a file''' try: f = open(filename, 'r') r = f.read() f.close() except: return None return r def reconfigure(ctx): '''rerun configure if necessary''' if not os.path.exists(os.environ.get('WAFLOCK', '.lock-wscript')): raise Errors.WafError('configure has not been run') import samba_wildcard bld = samba_wildcard.fake_build_environment() Configure.autoconfig = True Scripting.check_configured(bld) def map_shlib_extension(ctx, name, python=False): '''map a filename with a shared library extension of .so to the real shlib name''' if name is None: return None if name[-1:].isdigit(): # some libraries have specified versions in the wscript rule return name (root1, ext1) = os.path.splitext(name) if python: return ctx.env.pyext_PATTERN % root1 else: (root2, ext2) = os.path.splitext(ctx.env.cshlib_PATTERN) return root1+ext2 Build.BuildContext.map_shlib_extension = map_shlib_extension def apply_pattern(filename, pattern): '''apply a filename pattern to a filename that may have a directory component''' dirname = os.path.dirname(filename) if not dirname: return pattern % filename basename = os.path.basename(filename) return os.path.join(dirname, pattern % basename) def make_libname(ctx, name, nolibprefix=False, version=None, python=False): """make a library filename Options: nolibprefix: don't include the lib prefix version : add a version number python : if we should use python module name conventions""" if python: libname = apply_pattern(name, ctx.env.pyext_PATTERN) else: libname = apply_pattern(name, ctx.env.cshlib_PATTERN) if nolibprefix and libname[0:3] == 'lib': libname = libname[3:] if version: if version[0] == '.': version = version[1:] (root, ext) = os.path.splitext(libname) if ext == ".dylib": # special case - version goes before the prefix libname = "%s.%s%s" % (root, version, ext) else: libname = "%s%s.%s" % (root, ext, version) return libname Build.BuildContext.make_libname = make_libname def get_tgt_list(bld): '''return a list of build objects for samba''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') # build a list of task generators we are interested in tgt_list = [] for tgt in targets: type = targets[tgt] if not type in ['SUBSYSTEM', 'MODULE', 'BINARY', 'LIBRARY', 'ASN1', 'PYTHON']: continue t = bld.get_tgen_by_name(tgt) if t is None: Logs.error("Target %s of type %s has no task generator" % (tgt, type)) sys.exit(1) tgt_list.append(t) return tgt_list from waflib.Context import WSCRIPT_FILE def PROCESS_SEPARATE_RULE(self, rule): ''' cause waf to process additional script based on `rule'. You should have file named wscript__rule in the current directory where stage is either 'configure' or 'build' ''' stage = '' if isinstance(self, Configure.ConfigurationContext): stage = 'configure' elif isinstance(self, Build.BuildContext): stage = 'build' file_path = os.path.join(self.path.abspath(), WSCRIPT_FILE+'_'+stage+'_'+rule) node = self.root.find_node(file_path) if node: try: cache = self.recurse_cache except AttributeError: cache = self.recurse_cache = {} if node not in cache: cache[node] = True self.pre_recurse(node) try: function_code = node.read('r', None) exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict) finally: self.post_recurse(node) Build.BuildContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE ConfigurationContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE def AD_DC_BUILD_IS_ENABLED(self): if self.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'): return True return False Build.BuildContext.AD_DC_BUILD_IS_ENABLED = AD_DC_BUILD_IS_ENABLED @feature('cprogram', 'cshlib', 'cstaticlib') @after('apply_lib_vars') @before('apply_obj_vars') def samba_before_apply_obj_vars(self): """before apply_obj_vars for uselib, this removes the standard paths""" def is_standard_libpath(env, path): for _path in env.STANDARD_LIBPATH: if _path == os.path.normpath(path): return True return False v = self.env for i in v['RPATH']: if is_standard_libpath(v, i): v['RPATH'].remove(i) for i in v['LIBPATH']: if is_standard_libpath(v, i): v['LIBPATH'].remove(i) # Samba options are mostly on by default (administrators and packagers # specify features to remove, not add), which is why default=True def samba_add_onoff_option(opt, option, help=(), dest=None, default=True, with_name="with", without_name="without"): if default is None: default_str = "auto" elif default is True: default_str = "yes" elif default is False: default_str = "no" else: default_str = str(default) if help == (): help = ("Build with %s support (default=%s)" % (option, default_str)) if dest is None: dest = "with_%s" % option.replace('-', '_') with_val = "--%s-%s" % (with_name, option) without_val = "--%s-%s" % (without_name, option) opt.add_option(with_val, help=help, action="store_true", dest=dest, default=default) opt.add_option(without_val, help=SUPPRESS_HELP, action="store_false", dest=dest) Options.OptionsContext.samba_add_onoff_option = samba_add_onoff_option ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/samba_version.py0000660000000000000000000002205600000000000022203 0ustar00rootroot00000000000000import os, sys from waflib import Utils, Context import samba_utils from samba_git import find_git def git_version_summary(path, env=None): git = find_git(env) if git is None: return ("GIT-UNKNOWN", {}) env.GIT = git environ = dict(os.environ) environ["GIT_DIR"] = '%s/.git' % path environ["GIT_WORK_TREE"] = path git = samba_utils.get_string(Utils.cmd_output(env.GIT + ' show --pretty=format:"%h%n%ct%n%H%n%cd" --stat HEAD', silent=True, env=environ)) lines = git.splitlines() if not lines or len(lines) < 4: return ("GIT-UNKNOWN", {}) fields = { "GIT_COMMIT_ABBREV": lines[0], "GIT_COMMIT_FULLREV": lines[2], "COMMIT_TIME": int(lines[1]), "COMMIT_DATE": lines[3], } ret = "GIT-" + fields["GIT_COMMIT_ABBREV"] if env.GIT_LOCAL_CHANGES: clean = Utils.cmd_output('%s diff HEAD | wc -l' % env.GIT, silent=True).strip() if clean == "0": fields["COMMIT_IS_CLEAN"] = 1 else: fields["COMMIT_IS_CLEAN"] = 0 ret += "+" return (ret, fields) def distversion_version_summary(path): #get version from .distversion file suffix = None fields = {} for line in Utils.readf(path + '/.distversion').splitlines(): if line == '': continue if line.startswith("#"): continue try: split_line = line.split("=") if split_line[1] != "": key = split_line[0] value = split_line[1] if key == "SUFFIX": suffix = value continue fields[key] = value except: print("Failed to parse line %s from .distversion file." % (line)) raise if "COMMIT_TIME" in fields: fields["COMMIT_TIME"] = int(fields["COMMIT_TIME"]) if suffix is None: return ("UNKNOWN", fields) return (suffix, fields) class SambaVersion(object): def __init__(self, version_dict, path, env=None, is_install=True): '''Determine the version number of samba See VERSION for the format. Entries on that file are also accepted as dictionary entries here ''' self.MAJOR=None self.MINOR=None self.RELEASE=None self.REVISION=None self.TP_RELEASE=None self.ALPHA_RELEASE=None self.BETA_RELEASE=None self.PRE_RELEASE=None self.RC_RELEASE=None self.IS_SNAPSHOT=True self.RELEASE_NICKNAME=None self.VENDOR_SUFFIX=None self.VENDOR_PATCH=None for a, b in version_dict.items(): if a.startswith("SAMBA_VERSION_"): setattr(self, a[14:], b) else: setattr(self, a, b) if self.IS_GIT_SNAPSHOT == "yes": self.IS_SNAPSHOT=True elif self.IS_GIT_SNAPSHOT == "no": self.IS_SNAPSHOT=False else: raise Exception("Unknown value for IS_GIT_SNAPSHOT: %s" % self.IS_GIT_SNAPSHOT) ## ## start with "3.0.22" ## self.MAJOR=int(self.MAJOR) self.MINOR=int(self.MINOR) self.RELEASE=int(self.RELEASE) SAMBA_VERSION_STRING = ("%u.%u.%u" % (self.MAJOR, self.MINOR, self.RELEASE)) ## ## maybe add "3.0.22a" or "4.0.0tp11" or "4.0.0alpha1" or "4.0.0beta1" or "3.0.22pre1" or "3.0.22rc1" ## We do not do pre or rc version on patch/letter releases ## if self.REVISION is not None: SAMBA_VERSION_STRING += self.REVISION if self.TP_RELEASE is not None: self.TP_RELEASE = int(self.TP_RELEASE) SAMBA_VERSION_STRING += "tp%u" % self.TP_RELEASE if self.ALPHA_RELEASE is not None: self.ALPHA_RELEASE = int(self.ALPHA_RELEASE) SAMBA_VERSION_STRING += ("alpha%u" % self.ALPHA_RELEASE) if self.BETA_RELEASE is not None: self.BETA_RELEASE = int(self.BETA_RELEASE) SAMBA_VERSION_STRING += ("beta%u" % self.BETA_RELEASE) if self.PRE_RELEASE is not None: self.PRE_RELEASE = int(self.PRE_RELEASE) SAMBA_VERSION_STRING += ("pre%u" % self.PRE_RELEASE) if self.RC_RELEASE is not None: self.RC_RELEASE = int(self.RC_RELEASE) SAMBA_VERSION_STRING += ("rc%u" % self.RC_RELEASE) if self.IS_SNAPSHOT: if not is_install: suffix = "DEVELOPERBUILD" self.vcs_fields = {} elif os.path.exists(os.path.join(path, ".git")): suffix, self.vcs_fields = git_version_summary(path, env=env) elif os.path.exists(os.path.join(path, ".distversion")): suffix, self.vcs_fields = distversion_version_summary(path) else: suffix = "UNKNOWN" self.vcs_fields = {} self.vcs_fields["SUFFIX"] = suffix SAMBA_VERSION_STRING += "-" + suffix else: self.vcs_fields = {} self.OFFICIAL_STRING = SAMBA_VERSION_STRING if self.VENDOR_SUFFIX is not None: SAMBA_VERSION_STRING += ("-" + self.VENDOR_SUFFIX) self.VENDOR_SUFFIX = self.VENDOR_SUFFIX if self.VENDOR_PATCH is not None: SAMBA_VERSION_STRING += ("-" + self.VENDOR_PATCH) self.VENDOR_PATCH = self.VENDOR_PATCH self.STRING = SAMBA_VERSION_STRING if self.RELEASE_NICKNAME is not None: self.STRING_WITH_NICKNAME = "%s (%s)" % (self.STRING, self.RELEASE_NICKNAME) else: self.STRING_WITH_NICKNAME = self.STRING def __str__(self): string="/* Autogenerated by waf */\n" +\ "#define SAMBA_VERSION_MAJOR %u\n" % self.MAJOR +\ "#define SAMBA_VERSION_MINOR %u\n" % self.MINOR +\ "#define SAMBA_VERSION_RELEASE %u\n" % self.RELEASE if self.REVISION is not None: string+="#define SAMBA_VERSION_REVISION %u\n" % self.REVISION if self.TP_RELEASE is not None: string+="#define SAMBA_VERSION_TP_RELEASE %u\n" % self.TP_RELEASE if self.ALPHA_RELEASE is not None: string+="#define SAMBA_VERSION_ALPHA_RELEASE %u\n" % self.ALPHA_RELEASE if self.BETA_RELEASE is not None: string+="#define SAMBA_VERSION_BETA_RELEASE %u\n" % self.BETA_RELEASE if self.PRE_RELEASE is not None: string+="#define SAMBA_VERSION_PRE_RELEASE %u\n" % self.PRE_RELEASE if self.RC_RELEASE is not None: string+="#define SAMBA_VERSION_RC_RELEASE %u\n" % self.RC_RELEASE for name in sorted(self.vcs_fields.keys()): string+="#define SAMBA_VERSION_%s " % name value = self.vcs_fields[name] string_types = str if sys.version_info[0] < 3: string_types = basestring if isinstance(value, string_types): string += "\"%s\"" % value elif type(value) is int: string += "%d" % value else: raise Exception("Unknown type for %s: %r" % (name, value)) string += "\n" string+="#define SAMBA_VERSION_OFFICIAL_STRING \"" + self.OFFICIAL_STRING + "\"\n" if self.VENDOR_SUFFIX is not None: string+="#define SAMBA_VERSION_VENDOR_SUFFIX " + self.VENDOR_SUFFIX + "\n" if self.VENDOR_PATCH is not None: string+="#define SAMBA_VERSION_VENDOR_PATCH " + self.VENDOR_PATCH + "\n" if self.RELEASE_NICKNAME is not None: string+="#define SAMBA_VERSION_RELEASE_NICKNAME " + self.RELEASE_NICKNAME + "\n" # We need to put this #ifdef in to the headers so that vendors can override the version with a function string+=''' #ifdef SAMBA_VERSION_VENDOR_FUNCTION # define SAMBA_VERSION_STRING SAMBA_VERSION_VENDOR_FUNCTION #else /* SAMBA_VERSION_VENDOR_FUNCTION */ # define SAMBA_VERSION_STRING "''' + self.STRING_WITH_NICKNAME + '''" #endif ''' string+="/* Version for mkrelease.sh: \nSAMBA_VERSION_STRING=" + self.STRING_WITH_NICKNAME + "\n */\n" return string def samba_version_file(version_file, path, env=None, is_install=True): '''Parse the version information from a VERSION file''' f = open(version_file, 'r') version_dict = {} for line in f: line = line.strip() if line == '': continue if line.startswith("#"): continue try: split_line = line.split("=") if split_line[1] != "": value = split_line[1].strip('"') version_dict[split_line[0]] = value except: print("Failed to parse line %s from %s" % (line, version_file)) raise return SambaVersion(version_dict, path, env=env, is_install=is_install) def load_version(env=None, is_install=True): '''load samba versions either from ./VERSION or git return a version object for detailed breakdown''' if not env: env = samba_utils.LOAD_ENVIRONMENT() version = samba_version_file("./VERSION", ".", env, is_install=is_install) Context.g_module.VERSION = version.STRING return version ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1156025 tevent-0.11.0/buildtools/wafsamba/samba_waf18.py0000660000000000000000000003412400000000000021443 0ustar00rootroot00000000000000# compatibility layer for building with more recent waf versions import os, shlex, sys from waflib import Build, Configure, Node, Utils, Options, Logs, TaskGen from waflib import ConfigSet from waflib.TaskGen import feature, after from waflib.Configure import conf, ConfigurationContext from waflib.Tools.flex import decide_ext # This version of flexfun runs in tsk.get_cwd() as opposed to the # bld.variant_dir: since input paths adjusted against tsk.get_cwd(), we have to # use tsk.get_cwd() for the work directory as well. def flexfun(tsk): env = tsk.env bld = tsk.generator.bld def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = lst = [] lst.extend(to_list(env.FLEX)) lst.extend(to_list(env.FLEXFLAGS)) inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs] if env.FLEX_MSYS: inputs = [x.replace(os.sep, '/') for x in inputs] lst.extend(inputs) lst = [x for x in lst if x] txt = bld.cmd_and_log(lst, cwd=tsk.get_cwd(), env=env.env or None, quiet=0) tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207 TaskGen.declare_chain( name = 'flex', rule = flexfun, # issue #854 ext_in = '.l', decider = decide_ext, ) Build.BuildContext.variant = 'default' Build.CleanContext.variant = 'default' Build.InstallContext.variant = 'default' Build.UninstallContext.variant = 'default' Build.ListContext.variant = 'default' def abspath(self, env=None): if env and hasattr(self, 'children'): return self.get_bld().abspath() return self.old_abspath() Node.Node.old_abspath = Node.Node.abspath Node.Node.abspath = abspath def bldpath(self, env=None): return self.abspath() #return self.path_from(self.ctx.bldnode.parent) Node.Node.bldpath = bldpath def srcpath(self, env=None): return self.abspath() #return self.path_from(self.ctx.bldnode.parent) Node.Node.srcpath = srcpath def store_fast(self, filename): file = open(filename, 'wb') data = self.get_merged_dict() try: Build.cPickle.dump(data, file, -1) finally: file.close() ConfigSet.ConfigSet.store_fast = store_fast def load_fast(self, filename): file = open(filename, 'rb') try: data = Build.cPickle.load(file) finally: file.close() self.table.update(data) ConfigSet.ConfigSet.load_fast = load_fast @feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') @after('propagate_uselib_vars', 'process_source') def apply_incpaths(self): lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES']) self.includes_nodes = lst cwdx = getattr(self.bld, 'cwdx', self.bld.bldnode) self.env['INCPATHS'] = [x.path_from(cwdx) for x in lst] @conf def define(self, key, val, quote=True, comment=None): assert key and isinstance(key, str) if val is None: val = () elif isinstance(val, bool): val = int(val) # waf 1.5 self.env[key] = val if isinstance(val, int) or isinstance(val, float): s = '%s=%s' else: s = quote and '%s="%s"' or '%s=%s' app = s % (key, str(val)) ban = key + '=' lst = self.env.DEFINES for x in lst: if x.startswith(ban): lst[lst.index(x)] = app break else: self.env.append_value('DEFINES', app) self.env.append_unique('define_key', key) # compat15 removes this but we want to keep it @conf def undefine(self, key, from_env=True, comment=None): assert key and isinstance(key, str) ban = key + '=' self.env.DEFINES = [x for x in self.env.DEFINES if not x.startswith(ban)] self.env.append_unique('define_key', key) # waf 1.5 if from_env: self.env[key] = () class ConfigurationContext(Configure.ConfigurationContext): def init_dirs(self): self.setenv('default') self.env.merge_config_header = True return super(ConfigurationContext, self).init_dirs() def find_program_samba(self, *k, **kw): # Override the waf default set in the @conf decorator in Configure.py if 'mandatory' not in kw: kw['mandatory'] = False ret = self.find_program_old(*k, **kw) return ret Configure.ConfigurationContext.find_program_old = Configure.ConfigurationContext.find_program Configure.ConfigurationContext.find_program = find_program_samba Build.BuildContext.ENFORCE_GROUP_ORDERING = Utils.nada Build.BuildContext.AUTOCLEANUP_STALE_FILES = Utils.nada @conf def check(self, *k, **kw): '''Override the waf defaults to inject --with-directory options''' # match the configuration test with speficic options, for example: # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv" self.validate_c(kw) additional_dirs = [] if 'msg' in kw: msg = kw['msg'] for x in Options.OptionsContext.parser.parser.option_list: if getattr(x, 'match', None) and msg in x.match: d = getattr(Options.options, x.dest, '') if d: additional_dirs.append(d) # we add the additional dirs twice: once for the test data, and again if the compilation test suceeds below def add_options_dir(dirs, env): for x in dirs: if not x in env.CPPPATH: env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH if not x in env.LIBPATH: env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH add_options_dir(additional_dirs, kw['env']) self.start_msg(kw['msg'], **kw) ret = None try: ret = self.run_build(*k, **kw) except self.errors.ConfigurationError: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: raise else: self.fatal('The configuration failed') else: kw['success'] = ret # success! time for brandy add_options_dir(additional_dirs, self.env) ret = self.post_check(*k, **kw) if not ret: self.end_msg(kw['errmsg'], 'YELLOW', **kw) self.fatal('The configuration failed %r' % ret) else: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret @conf def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): '''see if the platform supports building libraries''' if msg is None: if rpath: msg = "rpath library support" else: msg = "building library support" def build(bld): lib_node = bld.srcnode.make_node('libdir/liblc1.c') lib_node.parent.mkdir() lib_node.write('int lib_func(void) { return 42; }\n', 'w') main_node = bld.srcnode.make_node('main.c') main_node.write('int main(void) {return !(lib_func() == 42);}', 'w') linkflags = [] if version_script: script = bld.srcnode.make_node('ldscript') script.write('TEST_1.0A2 { global: *; };\n', 'w') linkflags.append('-Wl,--version-script=%s' % script.abspath()) bld(features='c cshlib', source=lib_node, target='lib1', linkflags=linkflags, name='lib1') o = bld(features='c cprogram', source=main_node, target='prog1', uselib_local='lib1') if rpath: o.rpath = [lib_node.parent.abspath()] def run_app(self): args = conf.SAMBA_CROSS_ARGS(msg=msg) env = dict(os.environ) env['LD_LIBRARY_PATH'] = self.inputs[0].parent.abspath() + os.pathsep + env.get('LD_LIBRARY_PATH', '') self.generator.bld.cmd_and_log([self.inputs[0].abspath()] + args, env=env) o.post() bld(rule=run_app, source=o.link_task.outputs[0]) # ok, so it builds try: conf.check(build_fun=build, msg='Checking for %s' % msg) except conf.errors.ConfigurationError: return False return True @conf def CHECK_NEED_LC(conf, msg): '''check if we need -lc''' def build(bld): lib_node = bld.srcnode.make_node('libdir/liblc1.c') lib_node.parent.mkdir() lib_node.write('#include \nint lib_func(void) { FILE *f = fopen("foo", "r");}\n', 'w') bld(features='c cshlib', source=[lib_node], linkflags=conf.env.EXTRA_LDFLAGS, target='liblc') try: conf.check(build_fun=build, msg=msg, okmsg='-lc is unnecessary', errmsg='-lc is necessary') except conf.errors.ConfigurationError: return False return True # already implemented on "waf -v" def order(bld, tgt_list): return True Build.BuildContext.check_group_ordering = order @conf def CHECK_CFG(self, *k, **kw): if 'args' in kw: kw['args'] = shlex.split(kw['args']) if not 'mandatory' in kw: kw['mandatory'] = False kw['global_define'] = True return self.check_cfg(*k, **kw) def cmd_output(cmd, **kw): silent = False if 'silent' in kw: silent = kw['silent'] del(kw['silent']) if 'e' in kw: tmp = kw['e'] del(kw['e']) kw['env'] = tmp kw['shell'] = isinstance(cmd, str) kw['stdout'] = Utils.subprocess.PIPE if silent: kw['stderr'] = Utils.subprocess.PIPE try: p = Utils.subprocess.Popen(cmd, **kw) output = p.communicate()[0] except OSError as e: raise ValueError(str(e)) if p.returncode: if not silent: msg = "command execution failed: %s -> %r" % (cmd, str(output)) raise ValueError(msg) output = '' return output Utils.cmd_output = cmd_output @TaskGen.feature('c', 'cxx', 'd') @TaskGen.before('apply_incpaths', 'propagate_uselib_vars') @TaskGen.after('apply_link', 'process_source') def apply_uselib_local(self): """ process the uselib_local attribute execute after apply_link because of the execution order set on 'link_task' """ env = self.env from waflib.Tools.ccroot import stlink_task # 1. the case of the libs defined in the project (visit ancestors first) # the ancestors external libraries (uselib) will be prepended self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'uselib_local', [])) get = self.bld.get_tgen_by_name seen = set() seen_uselib = set() tmp = Utils.deque(names) # consume a copy of the list of names if tmp: if Logs.verbose: Logs.warn('compat: "uselib_local" is deprecated, replace by "use"') while tmp: lib_name = tmp.popleft() # visit dependencies only once if lib_name in seen: continue y = get(lib_name) y.post() seen.add(lib_name) # object has ancestors to process (shared libraries): add them to the end of the list if getattr(y, 'uselib_local', None): for x in self.to_list(getattr(y, 'uselib_local', [])): obj = get(x) obj.post() if getattr(obj, 'link_task', None): if not isinstance(obj.link_task, stlink_task): tmp.append(x) # link task and flags if getattr(y, 'link_task', None): link_name = y.target[y.target.rfind(os.sep) + 1:] if isinstance(y.link_task, stlink_task): env.append_value('STLIB', [link_name]) else: # some linkers can link against programs env.append_value('LIB', [link_name]) # the order self.link_task.set_run_after(y.link_task) # for the recompilation self.link_task.dep_nodes += y.link_task.outputs # add the link path too tmp_path = y.link_task.outputs[0].parent.bldpath() if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', [tmp_path]) # add ancestors uselib too - but only propagate those that have no staticlib defined for v in self.to_list(getattr(y, 'uselib', [])): if v not in seen_uselib: seen_uselib.add(v) if not env['STLIB_' + v]: if not v in self.uselib: self.uselib.insert(0, v) # if the library task generator provides 'export_includes', add to the include path # the export_includes must be a list of paths relative to the other library if getattr(y, 'export_includes', None): self.includes.extend(y.to_incnodes(y.export_includes)) @TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib') @TaskGen.after('apply_link') def apply_objdeps(self): "add the .o files produced by some other object files in the same manner as uselib_local" names = getattr(self, 'add_objects', []) if not names: return names = self.to_list(names) get = self.bld.get_tgen_by_name seen = [] while names: x = names[0] # visit dependencies only once if x in seen: names = names[1:] continue # object does not exist ? y = get(x) # object has ancestors to process first ? update the list of names if getattr(y, 'add_objects', None): added = 0 lst = y.to_list(y.add_objects) lst.reverse() for u in lst: if u in seen: continue added = 1 names = [u]+names if added: continue # list of names modified, loop # safe to process the current object y.post() seen.append(x) for t in getattr(y, 'compiled_tasks', []): self.link_task.inputs.extend(t.outputs) @TaskGen.after('apply_link') def process_obj_files(self): if not hasattr(self, 'obj_files'): return for x in self.obj_files: node = self.path.find_resource(x) self.link_task.inputs.append(node) @TaskGen.taskgen_method def add_obj_file(self, file): """Small example on how to link object files as if they were source obj = bld.create_obj('cc') obj.add_obj_file('foo.o')""" if not hasattr(self, 'obj_files'): self.obj_files = [] if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files') self.obj_files.append(file) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1156025 tevent-0.11.0/buildtools/wafsamba/samba_wildcard.py0000660000000000000000000001061100000000000022301 0ustar00rootroot00000000000000# based on playground/evil in the waf svn tree import os, datetime, fnmatch from waflib import Scripting, Utils, Options, Logs, Errors from waflib import ConfigSet, Context from samba_utils import LOCAL_CACHE def run_task(t, k): '''run a single build task''' ret = t.run() if ret: raise Errors.WafError("Failed to build %s: %u" % (k, ret)) def run_named_build_task(cmd): '''run a named build task, matching the cmd name using fnmatch wildcards against inputs and outputs of all build tasks''' bld = fake_build_environment(info=False) found = False cwd_node = bld.root.find_dir(os.getcwd()) top_node = bld.root.find_dir(bld.srcnode.abspath()) cmd = os.path.normpath(cmd) # cope with builds of bin/*/* if os.path.islink(cmd): cmd = os.path.relpath(os.readlink(cmd), os.getcwd()) if cmd[0:12] == "bin/default/": cmd = cmd[12:] for g in bld.task_manager.groups: for attr in ['outputs', 'inputs']: for t in g.tasks: s = getattr(t, attr, []) for k in s: relpath1 = k.relpath_gen(cwd_node) relpath2 = k.relpath_gen(top_node) if (fnmatch.fnmatch(relpath1, cmd) or fnmatch.fnmatch(relpath2, cmd)): t.position = [0,0] print(t.display()) run_task(t, k) found = True if not found: raise Errors.WafError("Unable to find build target matching %s" % cmd) def rewrite_compile_targets(): '''cope with the bin/ form of compile target''' if not Options.options.compile_targets: return bld = fake_build_environment(info=False) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') tlist = [] for t in Options.options.compile_targets.split(','): if not os.path.islink(t): tlist.append(t) continue link = os.readlink(t) list = link.split('/') for name in [list[-1], '/'.join(list[-2:])]: if name in targets: tlist.append(name) continue Options.options.compile_targets = ",".join(tlist) def wildcard_main(missing_cmd_fn): '''this replaces main from Scripting, allowing us to override the behaviour for unknown commands If a unknown command is found, then missing_cmd_fn() is called with the name of the requested command ''' Scripting.commands = Options.arg_line[:] # rewrite the compile targets to cope with the bin/xx form rewrite_compile_targets() while Scripting.commands: x = Scripting.commands.pop(0) ini = datetime.datetime.now() if x == 'configure': fun = Scripting.configure elif x == 'build': fun = Scripting.build else: fun = getattr(Utils.g_module, x, None) # this is the new addition on top of main from Scripting.py if not fun: missing_cmd_fn(x) break ctx = getattr(Utils.g_module, x + '_context', Utils.Context)() if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']: try: fun(ctx) except TypeError: fun() else: fun(ctx) ela = '' if not Options.options.progress_bar: ela = ' (%s)' % Utils.get_elapsed_time(ini) if x != 'init' and x != 'shutdown': Logs.info('%r finished successfully%s' % (x, ela)) if not Scripting.commands and x != 'shutdown': Scripting.commands.append('shutdown') def fake_build_environment(info=True, flush=False): """create all the tasks for the project, but do not run the build return the build context in use""" bld = getattr(Context.g_module, 'build_context', Utils.Context)() bld = Scripting.check_configured(bld) Options.commands['install'] = False Options.commands['uninstall'] = False bld.is_install = 0 # False try: proj = ConfigSet.ConfigSet(Options.lockfile) except IOError: raise Errors.WafError("Project not configured (run './configure' first)") bld.load_envs() if info: Logs.info("Waf: Entering directory `%s'" % bld.bldnode.abspath()) bld.add_subdirs([os.path.split(Context.g_module.root_path)[0]]) bld.pre_build() if flush: bld.flush() return bld ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.857205 tevent-0.11.0/buildtools/wafsamba/stale_files.py0000660000000000000000000000771700000000000021654 0ustar00rootroot00000000000000# encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Add a pre-build hook to remove all build files which do not have a corresponding target This can be used for example to remove the targets that have changed name without performing a full 'waf clean' Of course, it will only work if there are no dynamically generated nodes/tasks, in which case the method will have to be modified to exclude some folders for example. """ from waflib import Logs, Build, Options, Utils, Errors import os from wafsamba import samba_utils from Runner import Parallel old_refill_task_list = Parallel.refill_task_list def replace_refill_task_list(self): '''replacement for refill_task_list() that deletes stale files''' iit = old_refill_task_list(self) bld = self.bld if not getattr(bld, 'new_rules', False): # we only need to check for stale files if the build rules changed return iit if Options.options.compile_targets: # not safe when --target is used return iit # execute only once if getattr(self, 'cleanup_done', False): return iit self.cleanup_done = True def group_name(g): tm = self.bld.task_manager return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] bin_base = bld.bldnode.abspath() bin_base_len = len(bin_base) # paranoia if bin_base[-4:] != '/bin': raise Errors.WafError("Invalid bin base: %s" % bin_base) # obtain the expected list of files expected = [] for i in range(len(bld.task_manager.groups)): g = bld.task_manager.groups[i] tasks = g.tasks_gen for x in tasks: try: if getattr(x, 'target'): tlist = samba_utils.TO_LIST(getattr(x, 'target')) ttype = getattr(x, 'samba_type', None) task_list = getattr(x, 'compiled_tasks', []) if task_list: # this gets all of the .o files, including the task # ids, so foo.c maps to foo_3.o for idx=3 for tsk in task_list: for output in tsk.outputs: objpath = os.path.normpath(output.abspath(bld.env)) expected.append(objpath) for t in tlist: if ttype in ['LIBRARY','MODULE']: t = samba_utils.apply_pattern(t, bld.env.shlib_PATTERN) if ttype == 'PYTHON': t = samba_utils.apply_pattern(t, bld.env.pyext_PATTERN) p = os.path.join(x.path.abspath(bld.env), t) p = os.path.normpath(p) expected.append(p) for n in x.allnodes: p = n.abspath(bld.env) if p[0:bin_base_len] == bin_base: expected.append(p) except: pass for root, dirs, files in os.walk(bin_base): for f in files: p = root + '/' + f if os.path.islink(p): link = os.readlink(p) if link[0:bin_base_len] == bin_base: p = link if f in ['config.h']: continue (froot, fext) = os.path.splitext(f) if fext not in [ '.c', '.h', '.so', '.o' ]: continue if f[-7:] == '.inst.h': continue if p.find("/.conf") != -1: continue if not p in expected and os.path.exists(p): Logs.warn("Removing stale file: %s" % p) os.unlink(p) return iit def AUTOCLEANUP_STALE_FILES(bld): """automatically clean up any files in bin that shouldn't be there""" old_refill_task_list = Parallel.refill_task_list Parallel.refill_task_list = replace_refill_task_list Parallel.bld = bld Build.BuildContext.AUTOCLEANUP_STALE_FILES = AUTOCLEANUP_STALE_FILES ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/symbols.py0000660000000000000000000005334600000000000021051 0ustar00rootroot00000000000000# a waf tool to extract symbols from object files or libraries # using nm, producing a set of exposed defined/undefined symbols import os, re, subprocess from waflib import Utils, Build, Options, Logs, Errors from waflib.Logs import debug from samba_utils import TO_LIST, LOCAL_CACHE, get_tgt_list # these are the data structures used in symbols.py: # # bld.env.symbol_map : dictionary mapping public symbol names to list of # subsystem names where that symbol exists # # t.in_library : list of libraries that t is in # # bld.env.public_symbols: set of public symbols for each subsystem # bld.env.used_symbols : set of used symbols for each subsystem # # bld.env.syslib_symbols: dictionary mapping system library name to set of symbols # for that library # bld.env.library_dict : dictionary mapping built library paths to subsystem names # # LOCAL_CACHE(bld, 'TARGET_TYPE') : dictionary mapping subsystem name to target type def symbols_extract(bld, objfiles, dynamic=False): '''extract symbols from objfile, returning a dictionary containing the set of undefined and public symbols for each file''' ret = {} # see if we can get some results from the nm cache if not bld.env.nm_cache: bld.env.nm_cache = {} objfiles = set(objfiles).copy() remaining = set() for obj in objfiles: if obj in bld.env.nm_cache: ret[obj] = bld.env.nm_cache[obj].copy() else: remaining.add(obj) objfiles = remaining if len(objfiles) == 0: return ret cmd = ["nm"] if dynamic: # needed for some .so files cmd.append("-D") cmd.extend(list(objfiles)) nmpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout if len(objfiles) == 1: filename = list(objfiles)[0] ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set()} for line in nmpipe: line = line.strip() if line.endswith(b':'): filename = line[:-1] ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set() } continue cols = line.split(b" ") if cols == [b'']: continue # see if the line starts with an address if len(cols) == 3: symbol_type = cols[1] symbol = cols[2] else: symbol_type = cols[0] symbol = cols[1] if symbol_type in b"BDGTRVWSi": # its a public symbol ret[filename]["PUBLIC"].add(symbol) elif symbol_type in b"U": ret[filename]["UNDEFINED"].add(symbol) # add to the cache for obj in objfiles: if obj in ret: bld.env.nm_cache[obj] = ret[obj].copy() else: bld.env.nm_cache[obj] = { "PUBLIC": set(), "UNDEFINED" : set() } return ret def real_name(name): if name.find(".objlist") != -1: name = name[:-8] return name def find_ldd_path(bld, libname, binary): '''find the path to the syslib we will link against''' ret = None if not bld.env.syslib_paths: bld.env.syslib_paths = {} if libname in bld.env.syslib_paths: return bld.env.syslib_paths[libname] lddpipe = subprocess.Popen(['ldd', binary], stdout=subprocess.PIPE).stdout for line in lddpipe: line = line.strip() cols = line.split(b" ") if len(cols) < 3 or cols[1] != b"=>": continue if cols[0].startswith(b"libc."): # save this one too bld.env.libc_path = cols[2] if cols[0].startswith(libname): ret = cols[2] bld.env.syslib_paths[libname] = ret return ret # some regular expressions for parsing readelf output re_sharedlib = re.compile(b'Shared library: \[(.*)\]') # output from readelf could be `Library rpath` or `Libray runpath` re_rpath = re.compile(b'Library (rpath|runpath): \[(.*)\]') def get_libs(bld, binname): '''find the list of linked libraries for any binary or library binname is the path to the binary/library on disk We do this using readelf instead of ldd as we need to avoid recursing into system libraries ''' # see if we can get the result from the ldd cache if not bld.env.lib_cache: bld.env.lib_cache = {} if binname in bld.env.lib_cache: return bld.env.lib_cache[binname].copy() rpath = [] libs = set() elfpipe = subprocess.Popen(['readelf', '--dynamic', binname], stdout=subprocess.PIPE).stdout for line in elfpipe: m = re_sharedlib.search(line) if m: libs.add(m.group(1)) m = re_rpath.search(line) if m: # output from Popen is always bytestr even in py3 rpath.extend(m.group(2).split(b":")) ret = set() for lib in libs: found = False for r in rpath: path = os.path.join(r, lib) if os.path.exists(path): ret.add(os.path.realpath(path)) found = True break if not found: # we didn't find this lib using rpath. It is probably a system # library, so to find the path to it we either need to use ldd # or we need to start parsing /etc/ld.so.conf* ourselves. We'll # use ldd for now, even though it is slow path = find_ldd_path(bld, lib, binname) if path: ret.add(os.path.realpath(path)) bld.env.lib_cache[binname] = ret.copy() return ret def get_libs_recursive(bld, binname, seen): '''find the recursive list of linked libraries for any binary or library binname is the path to the binary/library on disk. seen is a set used to prevent loops ''' if binname in seen: return set() ret = get_libs(bld, binname) seen.add(binname) for lib in ret: # we don't want to recurse into system libraries. If a system # library that we use (eg. libcups) happens to use another library # (such as libkrb5) which contains common symbols with our own # libraries, then that is not an error if lib in bld.env.library_dict: ret = ret.union(get_libs_recursive(bld, lib, seen)) return ret def find_syslib_path(bld, libname, deps): '''find the path to the syslib we will link against''' # the strategy is to use the targets that depend on the library, and run ldd # on it to find the real location of the library that is used linkpath = deps[0].link_task.outputs[0].abspath(bld.env) if libname == "python": libname += bld.env.PYTHON_VERSION return find_ldd_path(bld, "lib%s" % libname.lower(), linkpath) def build_symbol_sets(bld, tgt_list): '''build the public_symbols and undefined_symbols attributes for each target''' if bld.env.public_symbols: return objlist = [] # list of object file objmap = {} # map from object filename to target (subsystem) name for t in tgt_list: t.public_symbols = set() t.undefined_symbols = set() t.used_symbols = set() for tsk in getattr(t, 'compiled_tasks', []): for output in tsk.outputs: objpath = output.abspath(bld.env) objlist.append(objpath) objmap[objpath] = t symbols = symbols_extract(bld, objlist) for obj in objlist: t = objmap[obj] t.public_symbols = t.public_symbols.union(symbols[obj]["PUBLIC"]) t.undefined_symbols = t.undefined_symbols.union(symbols[obj]["UNDEFINED"]) t.used_symbols = t.used_symbols.union(symbols[obj]["UNDEFINED"]) t.undefined_symbols = t.undefined_symbols.difference(t.public_symbols) # and the reverse map of public symbols to subsystem name bld.env.symbol_map = {} for t in tgt_list: for s in t.public_symbols: if not s in bld.env.symbol_map: bld.env.symbol_map[s] = [] bld.env.symbol_map[s].append(real_name(t.sname)) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') bld.env.public_symbols = {} for t in tgt_list: name = real_name(t.sname) if name in bld.env.public_symbols: bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t.public_symbols) else: bld.env.public_symbols[name] = t.public_symbols if t.samba_type == 'LIBRARY': for dep in t.add_objects: t2 = bld.get_tgen_by_name(dep) bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t2.public_symbols) bld.env.used_symbols = {} for t in tgt_list: name = real_name(t.sname) if name in bld.env.used_symbols: bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t.used_symbols) else: bld.env.used_symbols[name] = t.used_symbols if t.samba_type == 'LIBRARY': for dep in t.add_objects: t2 = bld.get_tgen_by_name(dep) bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t2.used_symbols) def build_library_dict(bld, tgt_list): '''build the library_dict dictionary''' if bld.env.library_dict: return bld.env.library_dict = {} for t in tgt_list: if t.samba_type in [ 'LIBRARY', 'PYTHON' ]: linkpath = os.path.realpath(t.link_task.outputs[0].abspath(bld.env)) bld.env.library_dict[linkpath] = t.sname def build_syslib_sets(bld, tgt_list): '''build the public_symbols for all syslibs''' if bld.env.syslib_symbols: return # work out what syslibs we depend on, and what targets those are used in syslibs = {} objmap = {} for t in tgt_list: if getattr(t, 'uselib', []) and t.samba_type in [ 'LIBRARY', 'BINARY', 'PYTHON' ]: for lib in t.uselib: if lib in ['PYEMBED', 'PYEXT']: lib = "python" if not lib in syslibs: syslibs[lib] = [] syslibs[lib].append(t) # work out the paths to each syslib syslib_paths = [] for lib in syslibs: path = find_syslib_path(bld, lib, syslibs[lib]) if path is None: Logs.warn("Unable to find syslib path for %s" % lib) if path is not None: syslib_paths.append(path) objmap[path] = lib.lower() # add in libc syslib_paths.append(bld.env.libc_path) objmap[bld.env.libc_path] = 'c' symbols = symbols_extract(bld, syslib_paths, dynamic=True) # keep a map of syslib names to public symbols bld.env.syslib_symbols = {} for lib in symbols: bld.env.syslib_symbols[lib] = symbols[lib]["PUBLIC"] # add to the map of symbols to dependencies for lib in symbols: for sym in symbols[lib]["PUBLIC"]: if not sym in bld.env.symbol_map: bld.env.symbol_map[sym] = [] bld.env.symbol_map[sym].append(objmap[lib]) # keep the libc symbols as well, as these are useful for some of the # sanity checks bld.env.libc_symbols = symbols[bld.env.libc_path]["PUBLIC"] # add to the combined map of dependency name to public_symbols for lib in bld.env.syslib_symbols: bld.env.public_symbols[objmap[lib]] = bld.env.syslib_symbols[lib] def build_autodeps(bld, t): '''build the set of dependencies for a target''' deps = set() name = real_name(t.sname) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for sym in t.undefined_symbols: if sym in t.public_symbols: continue if sym in bld.env.symbol_map: depname = bld.env.symbol_map[sym] if depname == [ name ]: # self dependencies aren't interesting continue if t.in_library == depname: # no need to depend on the library we are part of continue if depname[0] in ['c', 'python']: # these don't go into autodeps continue if targets[depname[0]] in [ 'SYSLIB' ]: deps.add(depname[0]) continue t2 = bld.get_tgen_by_name(depname[0]) if len(t2.in_library) != 1: deps.add(depname[0]) continue if t2.in_library == t.in_library: # if we're part of the same library, we don't need to autodep continue deps.add(t2.in_library[0]) t.autodeps = deps def build_library_names(bld, tgt_list): '''add a in_library attribute to all targets that are part of a library''' if bld.env.done_build_library_names: return for t in tgt_list: t.in_library = [] for t in tgt_list: if t.samba_type in [ 'LIBRARY' ]: for obj in t.samba_deps_extended: t2 = bld.get_tgen_by_name(obj) if t2 and t2.samba_type in [ 'SUBSYSTEM', 'ASN1' ]: if not t.sname in t2.in_library: t2.in_library.append(t.sname) bld.env.done_build_library_names = True def check_library_deps(bld, t): '''check that all the autodeps that have mutual dependency of this target are in the same library as the target''' name = real_name(t.sname) if len(t.in_library) > 1: Logs.warn("WARNING: Target '%s' in multiple libraries: %s" % (t.sname, t.in_library)) for dep in t.autodeps: t2 = bld.get_tgen_by_name(dep) if t2 is None: continue for dep2 in t2.autodeps: if dep2 == name and t.in_library != t2.in_library: Logs.warn("WARNING: mutual dependency %s <=> %s" % (name, real_name(t2.sname))) Logs.warn("Libraries should match. %s != %s" % (t.in_library, t2.in_library)) # raise Errors.WafError("illegal mutual dependency") def check_syslib_collisions(bld, tgt_list): '''check if a target has any symbol collisions with a syslib We do not want any code in Samba to use a symbol name from a system library. The chance of that causing problems is just too high. Note that libreplace uses a rep_XX approach of renaming symbols via macros ''' has_error = False for t in tgt_list: for lib in bld.env.syslib_symbols: common = t.public_symbols.intersection(bld.env.syslib_symbols[lib]) if common: Logs.error("ERROR: Target '%s' has symbols '%s' which is also in syslib '%s'" % (t.sname, common, lib)) has_error = True if has_error: raise Errors.WafError("symbols in common with system libraries") def check_dependencies(bld, t): '''check for depenencies that should be changed''' if bld.get_tgen_by_name(t.sname + ".objlist"): return targets = LOCAL_CACHE(bld, 'TARGET_TYPE') remaining = t.undefined_symbols.copy() remaining = remaining.difference(t.public_symbols) sname = real_name(t.sname) deps = set(t.samba_deps) for d in t.samba_deps: if targets[d] in [ 'EMPTY', 'DISABLED', 'SYSLIB', 'GENERATOR' ]: continue bld.ASSERT(d in bld.env.public_symbols, "Failed to find symbol list for dependency '%s'" % d) diff = remaining.intersection(bld.env.public_symbols[d]) if not diff and targets[sname] != 'LIBRARY': Logs.info("Target '%s' has no dependency on %s" % (sname, d)) else: remaining = remaining.difference(diff) t.unsatisfied_symbols = set() needed = {} for sym in remaining: if sym in bld.env.symbol_map: dep = bld.env.symbol_map[sym] if not dep[0] in needed: needed[dep[0]] = set() needed[dep[0]].add(sym) else: t.unsatisfied_symbols.add(sym) for dep in needed: Logs.info("Target '%s' should add dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) def check_syslib_dependencies(bld, t): '''check for syslib depenencies''' if bld.get_tgen_by_name(t.sname + ".objlist"): return sname = real_name(t.sname) remaining = set() features = TO_LIST(t.features) if 'pyembed' in features or 'pyext' in features: if 'python' in bld.env.public_symbols: t.unsatisfied_symbols = t.unsatisfied_symbols.difference(bld.env.public_symbols['python']) needed = {} for sym in t.unsatisfied_symbols: if sym in bld.env.symbol_map: dep = bld.env.symbol_map[sym][0] if dep == 'c': continue if not dep in needed: needed[dep] = set() needed[dep].add(sym) else: remaining.add(sym) for dep in needed: Logs.info("Target '%s' should add syslib dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) if remaining: debug("deps: Target '%s' has unsatisfied symbols: %s" % (sname, " ".join(remaining))) def symbols_symbolcheck(task): '''check the internal dependency lists''' bld = task.env.bld tgt_list = get_tgt_list(bld) build_symbol_sets(bld, tgt_list) build_library_names(bld, tgt_list) for t in tgt_list: t.autodeps = set() if getattr(t, 'source', ''): build_autodeps(bld, t) for t in tgt_list: check_dependencies(bld, t) for t in tgt_list: check_library_deps(bld, t) def symbols_syslibcheck(task): '''check the syslib dependencies''' bld = task.env.bld tgt_list = get_tgt_list(bld) build_syslib_sets(bld, tgt_list) check_syslib_collisions(bld, tgt_list) for t in tgt_list: check_syslib_dependencies(bld, t) def symbols_whyneeded(task): """check why 'target' needs to link to 'subsystem'""" bld = task.env.bld tgt_list = get_tgt_list(bld) why = Options.options.WHYNEEDED.split(":") if len(why) != 2: raise Errors.WafError("usage: WHYNEEDED=TARGET:DEPENDENCY") target = why[0] subsystem = why[1] build_symbol_sets(bld, tgt_list) build_library_names(bld, tgt_list) build_syslib_sets(bld, tgt_list) Logs.info("Checking why %s needs to link to %s" % (target, subsystem)) if not target in bld.env.used_symbols: Logs.warn("unable to find target '%s' in used_symbols dict" % target) return if not subsystem in bld.env.public_symbols: Logs.warn("unable to find subsystem '%s' in public_symbols dict" % subsystem) return overlap = bld.env.used_symbols[target].intersection(bld.env.public_symbols[subsystem]) if not overlap: Logs.info("target '%s' doesn't use any public symbols from '%s'" % (target, subsystem)) else: Logs.info("target '%s' uses symbols %s from '%s'" % (target, overlap, subsystem)) def report_duplicate(bld, binname, sym, libs, fail_on_error): '''report duplicated symbols''' if sym in ['_init', '_fini', '_edata', '_end', '__bss_start']: return libnames = [] for lib in libs: if lib in bld.env.library_dict: libnames.append(bld.env.library_dict[lib]) else: libnames.append(lib) if fail_on_error: raise Errors.WafError("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) else: print("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) def symbols_dupcheck_binary(bld, binname, fail_on_error): '''check for duplicated symbols in one binary''' libs = get_libs_recursive(bld, binname, set()) symlist = symbols_extract(bld, libs, dynamic=True) symmap = {} for libpath in symlist: for sym in symlist[libpath]['PUBLIC']: if sym == '_GLOBAL_OFFSET_TABLE_': continue if not sym in symmap: symmap[sym] = set() symmap[sym].add(libpath) for sym in symmap: if len(symmap[sym]) > 1: for libpath in symmap[sym]: if libpath in bld.env.library_dict: report_duplicate(bld, binname, sym, symmap[sym], fail_on_error) break def symbols_dupcheck(task, fail_on_error=False): '''check for symbols defined in two different subsystems''' bld = task.env.bld tgt_list = get_tgt_list(bld) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') build_library_dict(bld, tgt_list) for t in tgt_list: if t.samba_type == 'BINARY': binname = os.path.relpath(t.link_task.outputs[0].abspath(bld.env), os.getcwd()) symbols_dupcheck_binary(bld, binname, fail_on_error) def symbols_dupcheck_fatal(task): '''check for symbols defined in two different subsystems (and fail if duplicates are found)''' symbols_dupcheck(task, fail_on_error=True) def SYMBOL_CHECK(bld): '''check our dependency lists''' if Options.options.SYMBOLCHECK: bld.SET_BUILD_GROUP('symbolcheck') task = bld(rule=symbols_symbolcheck, always=True, name='symbol checking') task.env.bld = bld bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_syslibcheck, always=True, name='syslib checking') task.env.bld = bld bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_dupcheck, always=True, name='symbol duplicate checking') task.env.bld = bld if Options.options.WHYNEEDED: bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_whyneeded, always=True, name='check why a dependency is needed') task.env.bld = bld Build.BuildContext.SYMBOL_CHECK = SYMBOL_CHECK def DUP_SYMBOL_CHECK(bld): if Options.options.DUP_SYMBOLCHECK and bld.env.DEVELOPER: '''check for duplicate symbols''' bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_dupcheck_fatal, always=True, name='symbol duplicate checking') task.env.bld = bld Build.BuildContext.DUP_SYMBOL_CHECK = DUP_SYMBOL_CHECK ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.857205 tevent-0.11.0/buildtools/wafsamba/test_duplicate_symbol.sh0000770000000000000000000000044400000000000023732 0ustar00rootroot00000000000000#!/bin/sh # Run the waf duplicate symbol check, wrapped in subunit. . testprogs/blackbox/subunit.sh subunit_start_test duplicate_symbols if $PYTHON ./buildtools/bin/waf build --dup-symbol-check; then subunit_pass_test duplicate_symbols else echo | subunit_fail_test duplicate_symbols fi ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1579615787.857205 tevent-0.11.0/buildtools/wafsamba/tests/__init__.py0000660000000000000000000000224000000000000022245 0ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """Tests for wafsamba.""" from unittest import ( TestCase, TestLoader, ) def test_suite(): names = [ 'abi', 'bundled', 'utils', ] module_names = ['wafsamba.tests.test_' + name for name in names] loader = TestLoader() result = loader.suiteClass() suite = loader.loadTestsFromNames(module_names) result.addTests(suite) return result ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.4137106 tevent-0.11.0/buildtools/wafsamba/tests/test_abi.py0000660000000000000000000001037400000000000022307 0ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_abi import ( abi_write_vscript, normalise_signature, ) from io import StringIO class NormaliseSignatureTests(TestCase): def test_function_simple(self): self.assertEqual("int (const struct GUID *, const struct GUID *)", normalise_signature("$2 = {int (const struct GUID *, const struct GUID *)} 0xe871 ")) def test_maps_Bool(self): # Some types have different internal names self.assertEqual("bool (const struct GUID *)", normalise_signature("$1 = {_Bool (const struct GUID *)} 0xe75b ")) def test_function_keep(self): self.assertEqual( "enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)", normalise_signature("enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)")) def test_struct_constant(self): self.assertEqual( 'uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0', normalise_signature('$239 = {uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0}')) def test_incomplete_sequence(self): # Newer versions of gdb insert these incomplete sequence elements self.assertEqual( 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237", , node = "\\b\\000+\\020H`"}, if_version = 2}')) self.assertEqual( 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2}')) class WriteVscriptTests(TestCase): def test_one(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "old": "1.0", "new": "1.0"}, ["*"]) self.assertEqual(f.getvalue(), """\ 1.0 { \tglobal: \t\t*; \tlocal: \t\t_end; \t\t__bss_start; \t\t_edata; }; """) def test_simple(self): # No restrictions. f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", ["0.1"], { "old": "0.1", "new": "1.0"}, ["*"]) self.assertEqual(f.getvalue(), """\ MYLIB_0.1 { \tglobal: \t\told; }; 1.0 { \tglobal: \t\t*; \tlocal: \t\t_end; \t\t__bss_start; \t\t_edata; }; """) def test_exclude(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "exc_old": "0.1", "old": "0.1", "new": "1.0"}, ["!exc_*"]) self.assertEqual(f.getvalue(), """\ 1.0 { \tglobal: \t\t*; \tlocal: \t\texc_*; \t\t_end; \t\t__bss_start; \t\t_edata; }; """) def test_excludes_and_includes(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "pub_foo": "1.0", "exc_bar": "1.0", "other": "1.0" }, ["pub_*", "!exc_*"]) self.assertEqual(f.getvalue(), """\ 1.0 { \tglobal: \t\tpub_*; \tlocal: \t\texc_*; \t\t_end; \t\t__bss_start; \t\t_edata; \t\t*; }; """) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/tests/test_bundled.py0000660000000000000000000000176300000000000023173 0ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_bundled import ( tuplize_version, ) class TuplizeVersionTests(TestCase): def test_simple(self): self.assertEqual((1, 2, 10), tuplize_version("1.2.10")) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1594296290.3981044 tevent-0.11.0/buildtools/wafsamba/tests/test_utils.py0000660000000000000000000000471600000000000022717 0ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_utils import ( TO_LIST, dict_concat, subst_vars_error, unique_list, ) class ToListTests(TestCase): def test_none(self): self.assertEqual([], TO_LIST(None)) def test_already_list(self): self.assertEqual(["foo", "bar", 1], TO_LIST(["foo", "bar", 1])) def test_default_delimiter(self): self.assertEqual(["foo", "bar"], TO_LIST("foo bar")) self.assertEqual(["foo", "bar"], TO_LIST(" foo bar ")) self.assertEqual(["foo ", "bar"], TO_LIST(" \"foo \" bar ")) def test_delimiter(self): self.assertEqual(["foo", "bar"], TO_LIST("foo,bar", ",")) self.assertEqual([" foo", "bar "], TO_LIST(" foo,bar ", ",")) self.assertEqual([" \" foo\"", " bar "], TO_LIST(" \" foo\", bar ", ",")) class UniqueListTests(TestCase): def test_unique_list(self): self.assertEqual(["foo", "bar"], unique_list(["foo", "bar", "foo"])) class SubstVarsErrorTests(TestCase): def test_valid(self): self.assertEqual("", subst_vars_error("", {})) self.assertEqual("FOO bar", subst_vars_error("${F} bar", {"F": "FOO"})) def test_invalid(self): self.assertRaises(KeyError, subst_vars_error, "${F}", {}) class DictConcatTests(TestCase): def test_empty(self): ret = {} dict_concat(ret, {}) self.assertEqual({}, ret) def test_same(self): ret = {"foo": "bar"} dict_concat(ret, {"foo": "bla"}) self.assertEqual({"foo": "bar"}, ret) def test_simple(self): ret = {"foo": "bar"} dict_concat(ret, {"blie": "bla"}) self.assertEqual({"foo": "bar", "blie": "bla"}, ret) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1156025 tevent-0.11.0/buildtools/wafsamba/wafsamba.py0000660000000000000000000010476000000000000021137 0ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import os, sys, re, shutil, fnmatch from waflib import Build, Options, Task, Utils, TaskGen, Logs, Context, Errors from waflib.Configure import conf from waflib.Logs import debug from samba_utils import SUBST_VARS_RECURSIVE TaskGen.task_gen.apply_verif = Utils.nada # bring in the other samba modules from samba_utils import * from samba_utils import symlink from samba_version import * from samba_autoconf import * from samba_patterns import * from samba_pidl import * from samba_autoproto import * from samba_python import * from samba_perl import * from samba_deps import * from samba_bundled import * from samba_third_party import * import samba_cross import samba_install import samba_conftests import samba_abi import samba_headers import generic_cc import samba_dist import samba_wildcard import symbols import pkgconfig import configure_file import samba_waf18 LIB_PATH="shared" os.environ['PYTHONUNBUFFERED'] = '1' if Context.HEXVERSION not in (0x2001500,): Logs.error(''' Please use the version of waf that comes with Samba, not a system installed version. See http://wiki.samba.org/index.php/Waf for details. Alternatively, please run ./configure and make as usual. That will call the right version of waf.''') sys.exit(1) @conf def SAMBA_BUILD_ENV(conf): '''create the samba build environment''' conf.env.BUILD_DIRECTORY = conf.bldnode.abspath() mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, LIB_PATH)) mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, LIB_PATH, "private")) mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, "modules")) mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, 'python/samba/dcerpc')) # this allows all of the bin/shared and bin/python targets # to be expressed in terms of build directory paths mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, 'default')) for (source, target) in [('shared', 'shared'), ('modules', 'modules'), ('python', 'python')]: link_target = os.path.join(conf.env.BUILD_DIRECTORY, 'default/' + target) if not os.path.lexists(link_target): symlink('../' + source, link_target) # get perl to put the blib files in the build directory blib_bld = os.path.join(conf.env.BUILD_DIRECTORY, 'default/pidl/blib') blib_src = os.path.join(conf.srcnode.abspath(), 'pidl/blib') mkdir_p(blib_bld + '/man1') mkdir_p(blib_bld + '/man3') if os.path.islink(blib_src): os.unlink(blib_src) elif os.path.exists(blib_src): shutil.rmtree(blib_src) def ADD_INIT_FUNCTION(bld, subsystem, target, init_function): '''add an init_function to the list for a subsystem''' if init_function is None: return bld.ASSERT(subsystem is not None, "You must specify a subsystem for init_function '%s'" % init_function) cache = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') if not subsystem in cache: cache[subsystem] = [] cache[subsystem].append( { 'TARGET':target, 'INIT_FUNCTION':init_function } ) Build.BuildContext.ADD_INIT_FUNCTION = ADD_INIT_FUNCTION def generate_empty_file(task): task.outputs[0].write('') return 0 ################################################################# def SAMBA_LIBRARY(bld, libname, source, deps='', public_deps='', includes='', public_headers=None, public_headers_install=True, private_headers=None, header_path=None, pc_files=None, vnum=None, soname=None, cflags='', cflags_end=None, ldflags='', external_library=False, realname=None, keep_underscore=False, autoproto=None, autoproto_extra_source='', group='main', depends_on='', local_include=True, global_include=True, vars=None, subdir=None, install_path=None, install=True, pyembed=False, pyext=False, target_type='LIBRARY', bundled_extension=False, bundled_name=None, link_name=None, abi_directory=None, abi_match=None, hide_symbols=False, manpages=None, private_library=False, grouping_library=False, allow_undefined_symbols=False, allow_warnings=False, enabled=True): '''define a Samba library''' if private_library and public_headers: raise Errors.WafError("private library '%s' must not have public header files" % libname) if LIB_MUST_BE_PRIVATE(bld, libname): private_library = True if not enabled: SET_TARGET_TYPE(bld, libname, 'DISABLED') return source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) # remember empty libraries, so we can strip the dependencies if ((source == '') or (source == [])): if deps == '' and public_deps == '': SET_TARGET_TYPE(bld, libname, 'EMPTY') return empty_c = libname + '.empty.c' bld.SAMBA_GENERATOR('%s_empty_c' % libname, rule=generate_empty_file, target=empty_c) source=empty_c if BUILTIN_LIBRARY(bld, libname): obj_target = libname else: obj_target = libname + '.objlist' if group == 'libraries': subsystem_group = 'main' else: subsystem_group = group # first create a target for building the object files for this library # by separating in this way, we avoid recompiling the C files # separately for the install library and the build library bld.SAMBA_SUBSYSTEM(obj_target, source = source, deps = deps, public_deps = public_deps, includes = includes, public_headers = public_headers, public_headers_install = public_headers_install, private_headers= private_headers, header_path = header_path, cflags = cflags, cflags_end = cflags_end, group = subsystem_group, autoproto = autoproto, autoproto_extra_source=autoproto_extra_source, depends_on = depends_on, hide_symbols = hide_symbols, allow_warnings = allow_warnings, pyembed = pyembed, pyext = pyext, local_include = local_include, global_include = global_include) if BUILTIN_LIBRARY(bld, libname): return if not SET_TARGET_TYPE(bld, libname, target_type): return # the library itself will depend on that object target deps += ' ' + public_deps deps = TO_LIST(deps) deps.append(obj_target) realname = bld.map_shlib_extension(realname, python=(target_type=='PYTHON')) link_name = bld.map_shlib_extension(link_name, python=(target_type=='PYTHON')) # we don't want any public libraries without version numbers if (not private_library and target_type != 'PYTHON' and not realname): if vnum is None and soname is None: raise Errors.WafError("public library '%s' must have a vnum" % libname) if pc_files is None: raise Errors.WafError("public library '%s' must have pkg-config file" % libname) if public_headers is None: raise Errors.WafError("public library '%s' must have header files" % libname) if bundled_name is not None: pass elif target_type == 'PYTHON' or realname or not private_library: if keep_underscore: bundled_name = libname else: bundled_name = libname.replace('_', '-') else: assert (private_library == True and realname is None) if abi_directory or vnum or soname: bundled_extension=True bundled_name = PRIVATE_NAME(bld, libname.replace('_', '-'), bundled_extension, private_library) ldflags = TO_LIST(ldflags) if bld.env['ENABLE_RELRO'] is True: ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now')) features = 'c cshlib symlink_lib install_lib' if pyext: features += ' pyext' if pyembed: features += ' pyembed' if abi_directory: features += ' abi_check' if pyembed and bld.env['PYTHON_SO_ABI_FLAG']: # For ABI checking, we don't care about the Python version. # Remove the Python ABI tag (e.g. ".cpython-35m") abi_flag = bld.env['PYTHON_SO_ABI_FLAG'] replacement = '' version_libname = libname.replace(abi_flag, replacement) else: version_libname = libname vscript = None if bld.env.HAVE_LD_VERSION_SCRIPT: if private_library: version = "%s_%s" % (Context.g_module.APPNAME, Context.g_module.VERSION) elif vnum: version = "%s_%s" % (libname, vnum) else: version = None if version: vscript = "%s.vscript" % libname bld.ABI_VSCRIPT(version_libname, abi_directory, version, vscript, abi_match) fullname = apply_pattern(bundled_name, bld.env.cshlib_PATTERN) fullpath = bld.path.find_or_declare(fullname) vscriptpath = bld.path.find_or_declare(vscript) if not fullpath: raise Errors.WafError("unable to find fullpath for %s" % fullname) if not vscriptpath: raise Errors.WafError("unable to find vscript path for %s" % vscript) bld.add_manual_dependency(fullpath, vscriptpath) if bld.is_install: # also make the .inst file depend on the vscript instname = apply_pattern(bundled_name + '.inst', bld.env.cshlib_PATTERN) bld.add_manual_dependency(bld.path.find_or_declare(instname), bld.path.find_or_declare(vscript)) vscript = os.path.join(bld.path.abspath(bld.env), vscript) bld.SET_BUILD_GROUP(group) t = bld( features = features, source = [], target = bundled_name, depends_on = depends_on, samba_ldflags = ldflags, samba_deps = deps, samba_includes = includes, version_script = vscript, version_libname = version_libname, local_include = local_include, global_include = global_include, vnum = vnum, soname = soname, install_path = None, samba_inst_path = install_path, name = libname, samba_realname = realname, samba_install = install, abi_directory = "%s/%s" % (bld.path.abspath(), abi_directory), abi_match = abi_match, private_library = private_library, grouping_library=grouping_library, allow_undefined_symbols=allow_undefined_symbols ) if realname and not link_name: link_name = 'shared/%s' % realname if link_name: if 'waflib.extras.compat15' in sys.modules: link_name = 'default/' + link_name t.link_name = link_name if pc_files is not None and not private_library: if pyembed: bld.PKG_CONFIG_FILES(pc_files, vnum=vnum, extra_name=bld.env['PYTHON_SO_ABI_FLAG']) else: bld.PKG_CONFIG_FILES(pc_files, vnum=vnum) if (manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']): bld.MANPAGES(manpages, install) Build.BuildContext.SAMBA_LIBRARY = SAMBA_LIBRARY ################################################################# def SAMBA_BINARY(bld, binname, source, deps='', includes='', public_headers=None, private_headers=None, header_path=None, modules=None, ldflags=None, cflags='', cflags_end=None, autoproto=None, use_hostcc=False, use_global_deps=True, compiler=None, group='main', manpages=None, local_include=True, global_include=True, subsystem_name=None, allow_warnings=False, pyembed=False, vars=None, subdir=None, install=True, install_path=None, enabled=True, fuzzer=False, for_selftest=False): '''define a Samba binary''' if for_selftest: install=False if not bld.CONFIG_GET('ENABLE_SELFTEST'): enabled=False if not enabled: SET_TARGET_TYPE(bld, binname, 'DISABLED') return # Fuzzing builds do not build normal binaries # however we must build asn1compile etc if not use_hostcc and bld.env.enable_fuzzing != fuzzer: SET_TARGET_TYPE(bld, binname, 'DISABLED') return if fuzzer: install = False if ldflags is None: ldflags = bld.env['FUZZ_TARGET_LDFLAGS'] if not SET_TARGET_TYPE(bld, binname, 'BINARY'): return features = 'c cprogram symlink_bin install_bin' if pyembed: features += ' pyembed' obj_target = binname + '.objlist' source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) source = unique_list(TO_LIST(source)) if group == 'binaries': subsystem_group = 'main' elif group == 'build_compilers': subsystem_group = 'compiler_libraries' else: subsystem_group = group # only specify PIE flags for binaries pie_cflags = TO_LIST(cflags) pie_ldflags = TO_LIST(ldflags) if bld.env['ENABLE_PIE'] is True: pie_cflags.extend(TO_LIST('-fPIE')) pie_ldflags.extend(TO_LIST('-pie')) if bld.env['ENABLE_RELRO'] is True: pie_ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now')) # first create a target for building the object files for this binary # by separating in this way, we avoid recompiling the C files # separately for the install binary and the build binary bld.SAMBA_SUBSYSTEM(obj_target, source = source, deps = deps, includes = includes, cflags = pie_cflags, cflags_end = cflags_end, group = subsystem_group, autoproto = autoproto, subsystem_name = subsystem_name, local_include = local_include, global_include = global_include, use_hostcc = use_hostcc, pyext = pyembed, allow_warnings = allow_warnings, use_global_deps= use_global_deps) bld.SET_BUILD_GROUP(group) # the binary itself will depend on that object target deps = TO_LIST(deps) deps.append(obj_target) t = bld( features = features, source = [], target = binname, samba_deps = deps, samba_includes = includes, local_include = local_include, global_include = global_include, samba_modules = modules, top = True, samba_subsystem= subsystem_name, install_path = None, samba_inst_path= install_path, samba_install = install, samba_ldflags = pie_ldflags ) if manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']: bld.MANPAGES(manpages, install) Build.BuildContext.SAMBA_BINARY = SAMBA_BINARY ################################################################# def SAMBA_MODULE(bld, modname, source, deps='', includes='', subsystem=None, init_function=None, module_init_name='samba_init_module', autoproto=None, autoproto_extra_source='', cflags='', cflags_end=None, internal_module=True, local_include=True, global_include=True, vars=None, subdir=None, enabled=True, pyembed=False, manpages=None, allow_undefined_symbols=False, allow_warnings=False, install=True ): '''define a Samba module.''' bld.ASSERT(subsystem, "You must specify a subsystem for SAMBA_MODULE(%s)" % modname) source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) if internal_module or BUILTIN_LIBRARY(bld, modname): # Do not create modules for disabled subsystems if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': return bld.SAMBA_SUBSYSTEM(modname, source, deps=deps, includes=includes, autoproto=autoproto, autoproto_extra_source=autoproto_extra_source, cflags=cflags, cflags_end=cflags_end, local_include=local_include, global_include=global_include, allow_warnings=allow_warnings, enabled=enabled) bld.ADD_INIT_FUNCTION(subsystem, modname, init_function) return if not enabled: SET_TARGET_TYPE(bld, modname, 'DISABLED') return # Do not create modules for disabled subsystems if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': return realname = modname deps += ' ' + subsystem while realname.startswith("lib"+subsystem+"_"): realname = realname[len("lib"+subsystem+"_"):] while realname.startswith(subsystem+"_"): realname = realname[len(subsystem+"_"):] build_name = "%s_module_%s" % (subsystem, realname) realname = bld.make_libname(realname) while realname.startswith("lib"): realname = realname[len("lib"):] build_link_name = "modules/%s/%s" % (subsystem, realname) if init_function: cflags += " -D%s=%s" % (init_function, module_init_name) bld.SAMBA_LIBRARY(modname, source, deps=deps, includes=includes, cflags=cflags, cflags_end=cflags_end, realname = realname, autoproto = autoproto, local_include=local_include, global_include=global_include, vars=vars, bundled_name=build_name, link_name=build_link_name, install_path="${MODULESDIR}/%s" % subsystem, pyembed=pyembed, manpages=manpages, allow_undefined_symbols=allow_undefined_symbols, allow_warnings=allow_warnings, install=install ) Build.BuildContext.SAMBA_MODULE = SAMBA_MODULE ################################################################# def SAMBA_SUBSYSTEM(bld, modname, source, deps='', public_deps='', includes='', public_headers=None, public_headers_install=True, private_headers=None, header_path=None, cflags='', cflags_end=None, group='main', init_function_sentinel=None, autoproto=None, autoproto_extra_source='', depends_on='', local_include=True, local_include_first=True, global_include=True, subsystem_name=None, enabled=True, use_hostcc=False, use_global_deps=True, vars=None, subdir=None, hide_symbols=False, allow_warnings=False, pyext=False, pyembed=False): '''define a Samba subsystem''' if not enabled: SET_TARGET_TYPE(bld, modname, 'DISABLED') return # remember empty subsystems, so we can strip the dependencies if ((source == '') or (source == [])): if deps == '' and public_deps == '': SET_TARGET_TYPE(bld, modname, 'EMPTY') return empty_c = modname + '.empty.c' bld.SAMBA_GENERATOR('%s_empty_c' % modname, rule=generate_empty_file, target=empty_c) source=empty_c if not SET_TARGET_TYPE(bld, modname, 'SUBSYSTEM'): return source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) source = unique_list(TO_LIST(source)) deps += ' ' + public_deps bld.SET_BUILD_GROUP(group) features = 'c' if pyext: features += ' pyext' if pyembed: features += ' pyembed' t = bld( features = features, source = source, target = modname, samba_cflags = CURRENT_CFLAGS(bld, modname, cflags, allow_warnings=allow_warnings, use_hostcc=use_hostcc, hide_symbols=hide_symbols), depends_on = depends_on, samba_deps = TO_LIST(deps), samba_includes = includes, local_include = local_include, local_include_first = local_include_first, global_include = global_include, samba_subsystem= subsystem_name, samba_use_hostcc = use_hostcc, samba_use_global_deps = use_global_deps, ) if cflags_end is not None: t.samba_cflags.extend(TO_LIST(cflags_end)) if autoproto is not None: bld.SAMBA_AUTOPROTO(autoproto, source + TO_LIST(autoproto_extra_source)) if public_headers is not None: bld.PUBLIC_HEADERS(public_headers, header_path=header_path, public_headers_install=public_headers_install) return t Build.BuildContext.SAMBA_SUBSYSTEM = SAMBA_SUBSYSTEM def SAMBA_GENERATOR(bld, name, rule, source='', target='', group='generators', enabled=True, public_headers=None, public_headers_install=True, private_headers=None, header_path=None, vars=None, dep_vars=[], always=False): '''A generic source generator target''' if not SET_TARGET_TYPE(bld, name, 'GENERATOR'): return if not enabled: return dep_vars.append('ruledeps') dep_vars.append('SAMBA_GENERATOR_VARS') bld.SET_BUILD_GROUP(group) t = bld( rule=rule, source=bld.EXPAND_VARIABLES(source, vars=vars), target=target, shell=isinstance(rule, str), update_outputs=True, before='c', ext_out='.c', samba_type='GENERATOR', dep_vars = dep_vars, name=name) if vars is None: vars = {} t.env.SAMBA_GENERATOR_VARS = vars if always: t.always = True if public_headers is not None: bld.PUBLIC_HEADERS(public_headers, header_path=header_path, public_headers_install=public_headers_install) return t Build.BuildContext.SAMBA_GENERATOR = SAMBA_GENERATOR @Utils.run_once def SETUP_BUILD_GROUPS(bld): '''setup build groups used to ensure that the different build phases happen consecutively''' bld.p_ln = bld.srcnode # we do want to see all targets! bld.env['USING_BUILD_GROUPS'] = True bld.add_group('setup') bld.add_group('generators') bld.add_group('hostcc_base_build_source') bld.add_group('hostcc_base_build_main') bld.add_group('hostcc_build_source') bld.add_group('hostcc_build_main') bld.add_group('vscripts') bld.add_group('base_libraries') bld.add_group('build_source') bld.add_group('prototypes') bld.add_group('headers') bld.add_group('main') bld.add_group('symbolcheck') bld.add_group('syslibcheck') bld.add_group('final') Build.BuildContext.SETUP_BUILD_GROUPS = SETUP_BUILD_GROUPS def SET_BUILD_GROUP(bld, group): '''set the current build group''' if not 'USING_BUILD_GROUPS' in bld.env: return bld.set_group(group) Build.BuildContext.SET_BUILD_GROUP = SET_BUILD_GROUP def SAMBA_SCRIPT(bld, name, pattern, installdir, installname=None): '''used to copy scripts from the source tree into the build directory for use by selftest''' source = bld.path.ant_glob(pattern, flat=True) bld.SET_BUILD_GROUP('build_source') for s in TO_LIST(source): iname = s if installname is not None: iname = installname target = os.path.join(installdir, iname) tgtdir = os.path.dirname(os.path.join(bld.srcnode.abspath(bld.env), '..', target)) mkdir_p(tgtdir) link_src = os.path.normpath(os.path.join(bld.path.abspath(), s)) link_dst = os.path.join(tgtdir, os.path.basename(iname)) if os.path.islink(link_dst) and os.readlink(link_dst) == link_src: continue if os.path.islink(link_dst): os.unlink(link_dst) Logs.info("symlink: %s -> %s/%s" % (s, installdir, iname)) symlink(link_src, link_dst) Build.BuildContext.SAMBA_SCRIPT = SAMBA_SCRIPT def copy_and_fix_python_path(task): pattern='sys.path.insert(0, "bin/python")' if task.env["PYTHONARCHDIR"] in sys.path and task.env["PYTHONDIR"] in sys.path: replacement = "" elif task.env["PYTHONARCHDIR"] == task.env["PYTHONDIR"]: replacement="""sys.path.insert(0, "%s")""" % task.env["PYTHONDIR"] else: replacement="""sys.path.insert(0, "%s") sys.path.insert(1, "%s")""" % (task.env["PYTHONARCHDIR"], task.env["PYTHONDIR"]) if task.env["PYTHON"][0].startswith("/"): replacement_shebang = "#!%s\n" % task.env["PYTHON"][0] else: replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PYTHON"][0] installed_location=task.outputs[0].bldpath(task.env) source_file = open(task.inputs[0].srcpath(task.env)) installed_file = open(installed_location, 'w') lineno = 0 for line in source_file: newline = line if (lineno == 0 and line[:2] == "#!"): newline = replacement_shebang elif pattern in line: newline = line.replace(pattern, replacement) installed_file.write(newline) lineno = lineno + 1 installed_file.close() os.chmod(installed_location, 0o755) return 0 def copy_and_fix_perl_path(task): pattern='use lib "$RealBin/lib";' replacement = "" if not task.env["PERL_LIB_INSTALL_DIR"] in task.env["PERL_INC"]: replacement = 'use lib "%s";' % task.env["PERL_LIB_INSTALL_DIR"] if task.env["PERL"][0] == "/": replacement_shebang = "#!%s\n" % task.env["PERL"] else: replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PERL"] installed_location=task.outputs[0].bldpath(task.env) source_file = open(task.inputs[0].srcpath(task.env)) installed_file = open(installed_location, 'w') lineno = 0 for line in source_file: newline = line if lineno == 0 and task.env["PERL_SPECIFIED"] == True and line[:2] == "#!": newline = replacement_shebang elif pattern in line: newline = line.replace(pattern, replacement) installed_file.write(newline) lineno = lineno + 1 installed_file.close() os.chmod(installed_location, 0o755) return 0 def install_file(bld, destdir, file, chmod=MODE_644, flat=False, python_fixup=False, perl_fixup=False, destname=None, base_name=None): '''install a file''' if not isinstance(file, str): file = file.abspath() destdir = bld.EXPAND_VARIABLES(destdir) if not destname: destname = file if flat: destname = os.path.basename(destname) dest = os.path.join(destdir, destname) if python_fixup: # fix the path python will use to find Samba modules inst_file = file + '.inst' bld.SAMBA_GENERATOR('python_%s' % destname, rule=copy_and_fix_python_path, dep_vars=["PYTHON","PYTHON_SPECIFIED","PYTHONDIR","PYTHONARCHDIR"], source=file, target=inst_file) file = inst_file if perl_fixup: # fix the path perl will use to find Samba modules inst_file = file + '.inst' bld.SAMBA_GENERATOR('perl_%s' % destname, rule=copy_and_fix_perl_path, dep_vars=["PERL","PERL_SPECIFIED","PERL_LIB_INSTALL_DIR"], source=file, target=inst_file) file = inst_file if base_name: file = os.path.join(base_name, file) bld.install_as(dest, file, chmod=chmod) def INSTALL_FILES(bld, destdir, files, chmod=MODE_644, flat=False, python_fixup=False, perl_fixup=False, destname=None, base_name=None): '''install a set of files''' for f in TO_LIST(files): install_file(bld, destdir, f, chmod=chmod, flat=flat, python_fixup=python_fixup, perl_fixup=perl_fixup, destname=destname, base_name=base_name) Build.BuildContext.INSTALL_FILES = INSTALL_FILES def INSTALL_WILDCARD(bld, destdir, pattern, chmod=MODE_644, flat=False, python_fixup=False, exclude=None, trim_path=None): '''install a set of files matching a wildcard pattern''' files=TO_LIST(bld.path.ant_glob(pattern, flat=True)) if trim_path: files2 = [] for f in files: files2.append(os.path.relpath(f, trim_path)) files = files2 if exclude: for f in files[:]: if fnmatch.fnmatch(f, exclude): files.remove(f) INSTALL_FILES(bld, destdir, files, chmod=chmod, flat=flat, python_fixup=python_fixup, base_name=trim_path) Build.BuildContext.INSTALL_WILDCARD = INSTALL_WILDCARD def INSTALL_DIR(bld, path, chmod=0o755, env=None): """Install a directory if it doesn't exist, always set permissions.""" if not path: return [] destpath = bld.EXPAND_VARIABLES(path) if Options.options.destdir: destpath = os.path.join(Options.options.destdir, destpath.lstrip(os.sep)) if bld.is_install > 0: if not os.path.isdir(destpath): try: Logs.info('* create %s', destpath) os.makedirs(destpath) os.chmod(destpath, chmod) except OSError as e: if not os.path.isdir(destpath): raise Errors.WafError("Cannot create the folder '%s' (error: %s)" % (path, e)) Build.BuildContext.INSTALL_DIR = INSTALL_DIR def INSTALL_DIRS(bld, destdir, dirs, chmod=0o755, env=None): '''install a set of directories''' destdir = bld.EXPAND_VARIABLES(destdir) dirs = bld.EXPAND_VARIABLES(dirs) for d in TO_LIST(dirs): INSTALL_DIR(bld, os.path.join(destdir, d), chmod, env) Build.BuildContext.INSTALL_DIRS = INSTALL_DIRS def MANPAGES(bld, manpages, install): '''build and install manual pages''' bld.env.MAN_XSL = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' for m in manpages.split(): source = m + '.xml' bld.SAMBA_GENERATOR(m, source=source, target=m, group='final', rule='${XSLTPROC} --xinclude -o ${TGT} --nonet ${MAN_XSL} ${SRC}' ) if install: bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) Build.BuildContext.MANPAGES = MANPAGES def SAMBAMANPAGES(bld, manpages, extra_source=None): '''build and install manual pages''' bld.env.SAMBA_EXPAND_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/expand-sambadoc.xsl' bld.env.SAMBA_MAN_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/man.xsl' bld.env.SAMBA_CATALOG = bld.bldnode.abspath() + '/docs-xml/build/catalog.xml' bld.env.SAMBA_CATALOGS = 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog file://' + bld.env.SAMBA_CATALOG for m in manpages.split(): source = m + '.xml' if extra_source is not None: source = [source, extra_source] bld.SAMBA_GENERATOR(m, source=source, target=m, group='final', dep_vars=['SAMBA_MAN_XSL', 'SAMBA_EXPAND_XSL', 'SAMBA_CATALOG'], rule='''XML_CATALOG_FILES="${SAMBA_CATALOGS}" export XML_CATALOG_FILES ${XSLTPROC} --xinclude --stringparam noreference 0 -o ${TGT}.xml --nonet ${SAMBA_EXPAND_XSL} ${SRC[0].abspath(env)} ${XSLTPROC} --nonet -o ${TGT} ${SAMBA_MAN_XSL} ${TGT}.xml''' ) bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) Build.BuildContext.SAMBAMANPAGES = SAMBAMANPAGES @after('apply_link') @feature('cshlib') def apply_bundle_remove_dynamiclib_patch(self): if self.env['MACBUNDLE'] or getattr(self,'mac_bundle',False): if not getattr(self,'vnum',None): try: self.env['LINKFLAGS'].remove('-dynamiclib') self.env['LINKFLAGS'].remove('-single_module') except ValueError: pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.1156025 tevent-0.11.0/buildtools/wafsamba/wscript0000660000000000000000000007121500000000000020420 0ustar00rootroot00000000000000#!/usr/bin/env python # this is a base set of waf rules that everything else pulls in first import os, sys from waflib import Configure, Logs, Options, Utils, Context, Errors import wafsamba from samba_utils import symlink from optparse import SUPPRESS_HELP # this forces configure to be re-run if any of the configure # sections of the build scripts change. We have to check # for this in sys.argv as options have not yet been parsed when # we need to set this. This is off by default until some issues # are resolved related to WAFCACHE. It will need a lot of testing # before it is enabled by default. if '--enable-auto-reconfigure' in sys.argv: Configure.autoconfig = 'clobber' def default_value(option, default=''): if option in Options.options.__dict__: return Options.options.__dict__[option] return default def options(opt): opt.load('compiler_cc') opt.load('gnu_dirs') gr = opt.option_group('library handling options') gr.add_option('--bundled-libraries', help=("comma separated list of bundled libraries. May include !LIBNAME to disable bundling a library. Can be 'NONE' or 'ALL' [auto]"), action="store", dest='BUNDLED_LIBS', default='') gr.add_option('--private-libraries', help=("comma separated list of normally public libraries to build instead as private libraries. May include !LIBNAME to disable making a library private. Can be 'NONE' or 'ALL' [auto]"), action="store", dest='PRIVATE_LIBS', default='') extension_default = default_value('PRIVATE_EXTENSION_DEFAULT') gr.add_option('--private-library-extension', help=("name extension for private libraries [%s]" % extension_default), action="store", dest='PRIVATE_EXTENSION', default=extension_default) extension_exception = default_value('PRIVATE_EXTENSION_EXCEPTION') gr.add_option('--private-extension-exception', help=("comma separated list of libraries to not apply extension to [%s]" % extension_exception), action="store", dest='PRIVATE_EXTENSION_EXCEPTION', default=extension_exception) builtin_default = default_value('BUILTIN_LIBRARIES_DEFAULT') gr.add_option('--builtin-libraries', help=("command separated list of libraries to build directly into binaries [%s]" % builtin_default), action="store", dest='BUILTIN_LIBRARIES', default=builtin_default) gr.add_option('--minimum-library-version', help=("list of minimum system library versions (LIBNAME1:version,LIBNAME2:version)"), action="store", dest='MINIMUM_LIBRARY_VERSION', default='') gr.add_option('--disable-rpath', help=("Disable use of rpath for build binaries"), action="store_true", dest='disable_rpath_build', default=False) gr.add_option('--disable-rpath-install', help=("Disable use of rpath for library path in installed files"), action="store_true", dest='disable_rpath_install', default=False) gr.add_option('--disable-rpath-private-install', help=("Disable use of rpath for private library path in installed files"), action="store_true", dest='disable_rpath_private_install', default=False) gr.add_option('--nonshared-binary', help=("Disable use of shared libs for the listed binaries"), action="store", dest='NONSHARED_BINARIES', default='') gr.add_option('--disable-symbol-versions', help=("Disable use of the --version-script linker option"), action="store_true", dest='disable_symbol_versions', default=False) opt.add_option('--with-modulesdir', help=("modules directory [PREFIX/modules]"), action="store", dest='MODULESDIR', default='${PREFIX}/modules') opt.add_option('--with-privatelibdir', help=("private library directory [PREFIX/lib/%s]" % Context.g_module.APPNAME), action="store", dest='PRIVATELIBDIR', default=None) opt.add_option('--with-libiconv', help='additional directory to search for libiconv', action='store', dest='iconv_open', default='/usr/local', match = ['Checking for library iconv', 'Checking for iconv_open', 'Checking for header iconv.h']) opt.add_option('--without-gettext', help=("Disable use of gettext"), action="store_true", dest='disable_gettext', default=False) gr = opt.option_group('developer options') gr.add_option('-C', help='enable configure caching', action='store_true', dest='enable_configure_cache') gr.add_option('--enable-auto-reconfigure', help='enable automatic reconfigure on build', action='store_true', dest='enable_auto_reconfigure') gr.add_option('--enable-debug', help=("Turn on debugging symbols"), action="store_true", dest='debug', default=False) gr.add_option('--enable-developer', help=("Turn on developer warnings and debugging"), action="store_true", dest='developer', default=False) gr.add_option('--pidl-developer', help=("annotate PIDL-generated code for developers"), action="store_true", dest='pidl_developer', default=False) gr.add_option('--disable-warnings-as-errors', help=("Do not treat all warnings as errors (disable -Werror)"), action="store_true", dest='disable_warnings_as_errors', default=False) opt.add_option('--enable-coverage', help=("enable options necessary for code coverage " "reporting on selftest (default=no)"), action="store_true", dest='enable_coverage', default=False) gr.add_option('--fatal-errors', help=("Stop compilation on first error (enable -Wfatal-errors)"), action="store_true", dest='fatal_errors', default=False) gr.add_option('--enable-gccdeps', help=("Enable use of gcc -MD dependency module"), action="store_true", dest='enable_gccdeps', default=True) gr.add_option('--pedantic', help=("Enable even more compiler warnings"), action='store_true', dest='pedantic', default=False) gr.add_option('--git-local-changes', help=("mark version with + if local git changes"), action='store_true', dest='GIT_LOCAL_CHANGES', default=False) gr.add_option('--address-sanitizer', help=("Enable address sanitizer compile and linker flags"), action="store_true", dest='address_sanitizer', default=False) gr.add_option('--undefined-sanitizer', help=("Enable undefined behaviour sanitizer compile and linker flags"), action="store_true", dest='undefined_sanitizer', default=False) gr.add_option('--enable-clangdb', help=("Enable use of clang_compilation_database"), action="store_true", dest='enable_clangdb', default=False) gr.add_option('--enable-libfuzzer', help=("Build fuzzing binaries (use ADDITIONAL_CFLAGS to specify compiler options for libFuzzer or use CC=honggfuzz/hfuzz-cc)"), action="store_true", dest='enable_libfuzzer', default=False) gr.add_option('--enable-afl-fuzzer', help=("Build fuzzing binaries AFL-style (typically use with CC=afl-gcc)"), action="store_true", dest='enable_afl_fuzzer', default=False) # Fuzz targets may need additional LDFLAGS that we can't use on # internal binaries like asn1_compile gr.add_option('--fuzz-target-ldflags', help=("Linker flags to be used when building fuzz targets"), action="store", dest='FUZZ_TARGET_LDFLAGS', default='') gr.add_option('--abi-check', help=("Check ABI signatures for libraries"), action='store_true', dest='ABI_CHECK', default=False) gr.add_option('--abi-check-disable', help=("Disable ABI checking (used with --enable-developer)"), action='store_true', dest='ABI_CHECK_DISABLE', default=False) gr.add_option('--abi-update', help=("Update ABI signature files for libraries"), action='store_true', dest='ABI_UPDATE', default=False) gr.add_option('--show-deps', help=("Show dependency tree for the given target"), dest='SHOWDEPS', default='') gr.add_option('--symbol-check', help=("check symbols in object files against project rules"), action='store_true', dest='SYMBOLCHECK', default=False) gr.add_option('--dup-symbol-check', help=("check for duplicate symbols in object files and system libs (must be configured with --enable-developer)"), action='store_true', dest='DUP_SYMBOLCHECK', default=False) gr.add_option('--why-needed', help=("TARGET:DEPENDENCY check why TARGET needs DEPENDENCY"), action='store', type='str', dest='WHYNEEDED', default=None) gr.add_option('--show-duplicates', help=("Show objects which are included in multiple binaries or libraries"), action='store_true', dest='SHOW_DUPLICATES', default=False) gr = opt.add_option_group('cross compilation options') gr.add_option('--cross-compile', help=("configure for cross-compilation"), action='store_true', dest='CROSS_COMPILE', default=False) gr.add_option('--cross-execute', help=("command prefix to use for cross-execution in configure"), action='store', dest='CROSS_EXECUTE', default='') gr.add_option('--cross-answers', help=("answers to cross-compilation configuration (auto modified)"), action='store', dest='CROSS_ANSWERS', default='') gr.add_option('--hostcc', help=("set host compiler when cross compiling"), action='store', dest='HOSTCC', default=False) # we use SUPPRESS_HELP for these, as they are ignored, and are there only # to allow existing RPM spec files to work opt.add_option('--build', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_BUILD', default='') opt.add_option('--host', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_HOST', default='') opt.add_option('--target', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_TARGET', default='') opt.add_option('--program-prefix', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_PROGRAM_PREFIX', default='') opt.add_option('--disable-dependency-tracking', help=SUPPRESS_HELP, action='store_true', dest='AUTOCONF_DISABLE_DEPENDENCY_TRACKING', default=False) opt.add_option('--disable-silent-rules', help=SUPPRESS_HELP, action='store_true', dest='AUTOCONF_DISABLE_SILENT_RULES', default=False) gr = opt.option_group('dist options') gr.add_option('--sign-release', help='sign the release tarball created by waf dist', action='store_true', dest='SIGN_RELEASE') gr.add_option('--tag', help='tag release in git at the same time', type='string', action='store', dest='TAG_RELEASE') opt.add_option('--disable-python', help='do not generate python modules', action='store_true', dest='disable_python', default=False) @Utils.run_once def configure(conf): conf.env.hlist = [] conf.env.srcdir = conf.srcnode.abspath() conf.define('SRCDIR', conf.env['srcdir']) conf.SETUP_CONFIGURE_CACHE(Options.options.enable_configure_cache) # load our local waf extensions conf.load('gnu_dirs') conf.load('wafsamba') conf.CHECK_CC_ENV() conf.load('compiler_c') conf.CHECK_STANDARD_LIBPATH() # we need git for 'waf dist' conf.find_program('git', var='GIT') # older gcc versions (< 4.4) does not work with gccdeps, so we have to see if the .d file is generated if Options.options.enable_gccdeps: # stale file removal - the configuration may pick up the old .pyc file p = os.path.join(conf.env.srcdir, 'buildtools/wafsamba/gccdeps.pyc') if os.path.exists(p): os.remove(p) conf.load('gccdeps') # make the install paths available in environment conf.env.LIBDIR = Options.options.LIBDIR or '${PREFIX}/lib' conf.env.BINDIR = Options.options.BINDIR or '${PREFIX}/bin' conf.env.SBINDIR = Options.options.SBINDIR or '${PREFIX}/sbin' conf.env.MODULESDIR = Options.options.MODULESDIR conf.env.PRIVATELIBDIR = Options.options.PRIVATELIBDIR conf.env.BUNDLED_LIBS = Options.options.BUNDLED_LIBS.split(',') conf.env.SYSTEM_LIBS = () conf.env.PRIVATE_LIBS = Options.options.PRIVATE_LIBS.split(',') conf.env.BUILTIN_LIBRARIES = Options.options.BUILTIN_LIBRARIES.split(',') conf.env.NONSHARED_BINARIES = Options.options.NONSHARED_BINARIES.split(',') conf.env.PRIVATE_EXTENSION = Options.options.PRIVATE_EXTENSION conf.env.PRIVATE_EXTENSION_EXCEPTION = Options.options.PRIVATE_EXTENSION_EXCEPTION.split(',') conf.env.CROSS_COMPILE = Options.options.CROSS_COMPILE conf.env.CROSS_EXECUTE = Options.options.CROSS_EXECUTE conf.env.CROSS_ANSWERS = Options.options.CROSS_ANSWERS conf.env.HOSTCC = Options.options.HOSTCC conf.env.AUTOCONF_BUILD = Options.options.AUTOCONF_BUILD conf.env.AUTOCONF_HOST = Options.options.AUTOCONF_HOST conf.env.AUTOCONF_PROGRAM_PREFIX = Options.options.AUTOCONF_PROGRAM_PREFIX conf.env.disable_python = Options.options.disable_python if (conf.env.AUTOCONF_HOST and conf.env.AUTOCONF_BUILD and conf.env.AUTOCONF_BUILD != conf.env.AUTOCONF_HOST): Logs.error('ERROR: Mismatch between --build and --host. Please use --cross-compile instead') sys.exit(1) if conf.env.AUTOCONF_PROGRAM_PREFIX: Logs.error('ERROR: --program-prefix not supported') sys.exit(1) # enable ABI checking for developers conf.env.ABI_CHECK = Options.options.ABI_CHECK or Options.options.developer if Options.options.ABI_CHECK_DISABLE: conf.env.ABI_CHECK = False try: conf.find_program('gdb', mandatory=True) except: conf.env.ABI_CHECK = False conf.env.enable_coverage = Options.options.enable_coverage if conf.env.enable_coverage: conf.ADD_LDFLAGS('-lgcov', testflags=True) conf.ADD_CFLAGS('--coverage', testflags=True) # disable abi check for coverage, otherwise ld will fail conf.env.ABI_CHECK = False conf.env.GIT_LOCAL_CHANGES = Options.options.GIT_LOCAL_CHANGES conf.CHECK_UNAME() # see if we can compile and run a simple C program conf.CHECK_CODE('printf("hello world")', define='HAVE_SIMPLE_C_PROG', mandatory=True, execute=True, headers='stdio.h', msg='Checking simple C program') # Try to find the right extra flags for -Werror behaviour for f in ["-Werror", # GCC "-errwarn=%all", # Sun Studio "-qhalt=w", # IBM xlc "-w2", # Tru64 ]: if conf.CHECK_CFLAGS([f]): if not 'WERROR_CFLAGS' in conf.env: conf.env['WERROR_CFLAGS'] = [] conf.env['WERROR_CFLAGS'].extend([f]) break # check which compiler/linker flags are needed for rpath support if conf.CHECK_LDFLAGS(['-Wl,-rpath,.']): conf.env['RPATH_ST'] = '-Wl,-rpath,%s' elif conf.CHECK_LDFLAGS(['-Wl,-R,.']): conf.env['RPATH_ST'] = '-Wl,-R,%s' # check for rpath if conf.CHECK_LIBRARY_SUPPORT(rpath=True): support_rpath = True conf.env.RPATH_ON_BUILD = not Options.options.disable_rpath_build conf.env.RPATH_ON_INSTALL = (conf.env.RPATH_ON_BUILD and not Options.options.disable_rpath_install) if not conf.env.PRIVATELIBDIR: conf.env.PRIVATELIBDIR = '%s/%s' % (conf.env.LIBDIR, Context.g_module.APPNAME) conf.env.RPATH_ON_INSTALL_PRIVATE = ( not Options.options.disable_rpath_private_install) else: support_rpath = False conf.env.RPATH_ON_INSTALL = False conf.env.RPATH_ON_BUILD = False conf.env.RPATH_ON_INSTALL_PRIVATE = False if not conf.env.PRIVATELIBDIR: # rpath is not possible so there is no sense in having a # private library directory by default. # the user can of course always override it. conf.env.PRIVATELIBDIR = conf.env.LIBDIR if (not Options.options.disable_symbol_versions and conf.CHECK_LIBRARY_SUPPORT(rpath=support_rpath, version_script=True, msg='-Wl,--version-script support')): conf.env.HAVE_LD_VERSION_SCRIPT = True else: conf.env.HAVE_LD_VERSION_SCRIPT = False if conf.CHECK_CFLAGS(['-fvisibility=hidden']): conf.env.VISIBILITY_CFLAGS = '-fvisibility=hidden' conf.CHECK_CODE('''int main(void) { return 0; } __attribute__((visibility("default"))) void vis_foo2(void) {}\n''', cflags=conf.env.VISIBILITY_CFLAGS, strict=True, define='HAVE_VISIBILITY_ATTR', addmain=False) # check HAVE_CONSTRUCTOR_ATTRIBUTE conf.CHECK_CODE(''' void test_constructor_attribute(void) __attribute__ ((constructor)); void test_constructor_attribute(void) { return; } int main(void) { return 0; } ''', 'HAVE_CONSTRUCTOR_ATTRIBUTE', addmain=False, strict=True, msg='Checking for library constructor support') # check HAVE_PRAGMA_INIT alternatively if not conf.env.HAVE_CONSTRUCTOR_ATTRIBUTE: conf.CHECK_CODE(''' #pragma init (test_init) void test_init(void) { return; } int main(void) { return 0; } ''', 'HAVE_PRAGMA_INIT', addmain=False, strict=True, msg='Checking for pragma init support') # check HAVE_DESTRUCTOR_ATTRIBUTE conf.CHECK_CODE(''' void test_destructor_attribute(void) __attribute__ ((destructor)); void test_destructor_attribute(void) { return; } int main(void) { return 0; } ''', 'HAVE_DESTRUCTOR_ATTRIBUTE', addmain=False, strict=True, msg='Checking for library destructor support') # check HAVE_PRAGMA_FINI alternatively if not conf.env.HAVE_DESTRUCTOR_ATTRIBUTE: conf.CHECK_CODE(''' #pragma fini (test_fini) void test_fini(void) { return; } int main(void) { return 0; } ''', 'HAVE_PRAGMA_FINI', addmain=False, strict=True, msg='Checking for pragma fini support') conf.CHECK_CODE(''' void test_attribute(void) __attribute__ (()); void test_attribute(void) { return; } int main(void) { return 0; } ''', 'HAVE___ATTRIBUTE__', addmain=False, strict=True, msg='Checking for __attribute__') # Solaris by defauls uses draft versions of some functions unless you set _POSIX_PTHREAD_SEMANTICS if sys.platform.startswith('sunos'): conf.DEFINE('_POSIX_PTHREAD_SEMANTICS', 1) if sys.platform.startswith('aix'): conf.DEFINE('_ALL_SOURCE', 1, add_to_cflags=True) # Might not be needed if ALL_SOURCE is defined # conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) # we should use the PIC options in waf instead # Some compilo didn't support -fPIC but just print a warning if conf.env['COMPILER_CC'] == "suncc": conf.ADD_CFLAGS('-KPIC', testflags=True) # we really want define here as we need to have this # define even during the tests otherwise detection of # boolean is broken conf.DEFINE('_STDC_C99', 1, add_to_cflags=True) conf.DEFINE('_XPG6', 1, add_to_cflags=True) else: conf.ADD_CFLAGS('-fPIC', testflags=True) # On Solaris 8 with suncc (at least) the flags for the linker to define the name of the # library are not always working (if the command line is very very long and with a lot # files) if conf.env['COMPILER_CC'] == "suncc": save = conf.env['SONAME_ST'] conf.env['SONAME_ST'] = '-Wl,-h,%s' if not conf.CHECK_SHLIB_INTRASINC_NAME_FLAGS("Checking if flags %s are ok" % conf.env['SONAME_ST']): conf.env['SONAME_ST'] = save conf.CHECK_INLINE() # check for pkgconfig conf.CHECK_CFG(atleast_pkgconfig_version='0.0.0') conf.DEFINE('_GNU_SOURCE', 1, add_to_cflags=True) conf.DEFINE('_XOPEN_SOURCE_EXTENDED', 1, add_to_cflags=True) # # Needs to be defined before std*.h and string*.h are included # As Python.h already brings string.h we need it in CFLAGS. # See memset_s() details here: # https://en.cppreference.com/w/c/string/byte/memset # if conf.CHECK_CFLAGS(['-D__STDC_WANT_LIB_EXT1__=1']): conf.ADD_CFLAGS('-D__STDC_WANT_LIB_EXT1__=1') # on Tru64 certain features are only available with _OSF_SOURCE set to 1 # and _XOPEN_SOURCE set to 600 if conf.env['SYSTEM_UNAME_SYSNAME'] == 'OSF1': conf.DEFINE('_OSF_SOURCE', 1, add_to_cflags=True) conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) # SCM_RIGHTS is only avail if _XOPEN_SOURCE iÑ• defined on IRIX if conf.env['SYSTEM_UNAME_SYSNAME'] == 'IRIX': conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) conf.DEFINE('_BSD_TYPES', 1, add_to_cflags=True) # Try to find the right extra flags for C99 initialisers for f in ["", "-AC99", "-qlanglvl=extc99", "-qlanglvl=stdc99", "-c99"]: if conf.CHECK_CFLAGS([f], ''' struct foo {int x;char y;}; struct foo bar = { .y = 'X', .x = 1 }; '''): if f != "": conf.ADD_CFLAGS(f) break # get the base headers we'll use for the rest of the tests conf.CHECK_HEADERS('stdio.h sys/types.h sys/stat.h stdlib.h stddef.h memory.h string.h', add_headers=True) conf.CHECK_HEADERS('strings.h inttypes.h stdint.h unistd.h minix/config.h', add_headers=True) conf.CHECK_HEADERS('ctype.h', add_headers=True) if sys.platform != 'darwin': conf.CHECK_HEADERS('standards.h', add_headers=True) conf.CHECK_HEADERS('stdbool.h stdint.h stdarg.h vararg.h', add_headers=True) conf.CHECK_HEADERS('limits.h assert.h') # see if we need special largefile flags if not conf.CHECK_LARGEFILE(): raise Errors.WafError('Samba requires large file support support, but not available on this platform: sizeof(off_t) < 8') if conf.env.HAVE_STDDEF_H and conf.env.HAVE_STDLIB_H: conf.DEFINE('STDC_HEADERS', 1) conf.CHECK_HEADERS('sys/time.h time.h', together=True) if conf.env.HAVE_SYS_TIME_H and conf.env.HAVE_TIME_H: conf.DEFINE('TIME_WITH_SYS_TIME', 1) # cope with different extensions for libraries (root, ext) = os.path.splitext(conf.env.cshlib_PATTERN) if ext[0] == '.': conf.define('SHLIBEXT', ext[1:], quote=True) else: conf.define('SHLIBEXT', "so", quote=True) # First try a header check for cross-compile friendlyness conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER #define B __BYTE_ORDER #elif defined(BYTE_ORDER) #define B BYTE_ORDER #endif #ifdef __LITTLE_ENDIAN #define LITTLE __LITTLE_ENDIAN #elif defined(LITTLE_ENDIAN) #define LITTLE LITTLE_ENDIAN #endif #if !defined(LITTLE) || !defined(B) || LITTLE != B #error Not little endian. #endif int main(void) { return 0; }\n""", addmain=False, headers="endian.h sys/endian.h", define="HAVE_LITTLE_ENDIAN") conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER #define B __BYTE_ORDER #elif defined(BYTE_ORDER) #define B BYTE_ORDER #endif #ifdef __BIG_ENDIAN #define BIG __BIG_ENDIAN #elif defined(BIG_ENDIAN) #define BIG BIG_ENDIAN #endif #if !defined(BIG) || !defined(B) || BIG != B #error Not big endian. #endif int main(void) { return 0; }\n""", addmain=False, headers="endian.h sys/endian.h", define="HAVE_BIG_ENDIAN") if not conf.CONFIG_SET("HAVE_BIG_ENDIAN") and not conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): # That didn't work! Do runtime test. conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; u.i = 0x01020304; return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;""", addmain=True, execute=True, define='HAVE_LITTLE_ENDIAN', msg="Checking for HAVE_LITTLE_ENDIAN - runtime") conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; u.i = 0x01020304; return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;""", addmain=True, execute=True, define='HAVE_BIG_ENDIAN', msg="Checking for HAVE_BIG_ENDIAN - runtime") # Extra sanity check. if conf.CONFIG_SET("HAVE_BIG_ENDIAN") == conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): Logs.error("Failed endian determination. The PDP-11 is back?") sys.exit(1) else: if conf.CONFIG_SET("HAVE_BIG_ENDIAN"): conf.DEFINE('WORDS_BIGENDIAN', 1) # check if signal() takes a void function if conf.CHECK_CODE('return *(signal (0, 0)) (0) == 1', define='RETSIGTYPE_INT', execute=False, headers='signal.h', msg='Checking if signal handlers return int'): conf.DEFINE('RETSIGTYPE', 'int') else: conf.DEFINE('RETSIGTYPE', 'void') conf.CHECK_VARIABLE('__FUNCTION__', define='HAVE_FUNCTION_MACRO') conf.CHECK_CODE('va_list ap1,ap2; va_copy(ap1,ap2)', define="HAVE_VA_COPY", msg="Checking for va_copy") conf.CHECK_CODE(''' #define eprintf(...) fprintf(stderr, __VA_ARGS__) eprintf("bla", "bar") ''', define='HAVE__VA_ARGS__MACRO') conf.env.enable_fuzzing = False conf.env.enable_libfuzzer = Options.options.enable_libfuzzer conf.env.enable_afl_fuzzer = Options.options.enable_afl_fuzzer if conf.env.enable_libfuzzer or conf.env.enable_afl_fuzzer: conf.env.enable_fuzzing = True conf.DEFINE('FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION', 1) conf.env.FUZZ_TARGET_LDFLAGS = Options.options.FUZZ_TARGET_LDFLAGS conf.env.enable_clangdb = Options.options.enable_clangdb if conf.env.enable_clangdb: conf.load('clang_compilation_database') # Create a symlink of the compile db for clangd symlink(os.path.join(conf.bldnode.abspath(), 'default/compile_commands.json'), os.path.join(conf.srcnode.abspath(), 'compile_commands.json'), force=True) conf.SAMBA_BUILD_ENV() def build(bld): # give a more useful message if the source directory has moved curdir = bld.path.abspath() srcdir = bld.srcnode.abspath() relpath = os.path.relpath(curdir, srcdir) if relpath.find('../') != -1: Logs.error('bld.path %s is not a child of %s' % (curdir, srcdir)) raise Errors.WafError('''The top source directory has moved. Please run distclean and reconfigure''') bld.SETUP_BUILD_GROUPS() bld.ENFORCE_GROUP_ORDERING() bld.CHECK_PROJECT_RULES() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0092094 tevent-0.11.0/third_party/cmocka/cmocka.c0000660000000000000000000033537300000000000020240 0ustar00rootroot00000000000000/* * Copyright 2008 Google Inc. * Copyright 2014-2018 Andreas Schneider * Copyright 2015 Jakub Hrozek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef HAVE_MALLOC_H #include #endif #ifdef HAVE_INTTYPES_H #include #endif #ifdef HAVE_SIGNAL_H #include #endif #ifdef HAVE_STRINGS_H #include #endif #include #include #include #include #include #include #include #include /* * This allows to add a platform specific header file. Some embedded platforms * sometimes miss certain types and definitions. * * Example: * * typedef unsigned long int uintptr_t * #define _UINTPTR_T 1 * #define _UINTPTR_T_DEFINED 1 */ #ifdef CMOCKA_PLATFORM_INCLUDE # include "cmocka_platform.h" #endif /* CMOCKA_PLATFORM_INCLUDE */ #include #include /* Size of guard bytes around dynamically allocated blocks. */ #define MALLOC_GUARD_SIZE 16 /* Pattern used to initialize guard blocks. */ #define MALLOC_GUARD_PATTERN 0xEF /* Pattern used to initialize memory allocated with test_malloc(). */ #define MALLOC_ALLOC_PATTERN 0xBA #define MALLOC_FREE_PATTERN 0xCD /* Alignment of allocated blocks. NOTE: This must be base2. */ #define MALLOC_ALIGNMENT sizeof(size_t) /* Printf formatting for source code locations. */ #define SOURCE_LOCATION_FORMAT "%s:%u" #if defined(HAVE_GCC_THREAD_LOCAL_STORAGE) # define CMOCKA_THREAD __thread #elif defined(HAVE_MSVC_THREAD_LOCAL_STORAGE) # define CMOCKA_THREAD __declspec(thread) #else # define CMOCKA_THREAD #endif #ifdef HAVE_CLOCK_REALTIME #define CMOCKA_CLOCK_GETTIME(clock_id, ts) clock_gettime((clock_id), (ts)) #else #define CMOCKA_CLOCK_GETTIME(clock_id, ts) #endif #ifndef MAX #define MAX(a,b) ((a) < (b) ? (b) : (a)) #endif /** * POSIX has sigsetjmp/siglongjmp, while Windows only has setjmp/longjmp. */ #ifdef HAVE_SIGLONGJMP # define cm_jmp_buf sigjmp_buf # define cm_setjmp(env) sigsetjmp(env, 1) # define cm_longjmp(env, val) siglongjmp(env, val) #else # define cm_jmp_buf jmp_buf # define cm_setjmp(env) setjmp(env) # define cm_longjmp(env, val) longjmp(env, val) #endif /* * Declare and initialize the pointer member of ValuePointer variable name * with ptr. */ #define declare_initialize_value_pointer_pointer(name, ptr) \ ValuePointer name ; \ name.value = 0; \ name.x.pointer = (void*)(ptr) /* * Declare and initialize the value member of ValuePointer variable name * with val. */ #define declare_initialize_value_pointer_value(name, val) \ ValuePointer name ; \ name.value = val /* Cast a LargestIntegralType to pointer_type via a ValuePointer. */ #define cast_largest_integral_type_to_pointer( \ pointer_type, largest_integral_type) \ ((pointer_type)((ValuePointer*)&(largest_integral_type))->x.pointer) /* Used to cast LargetIntegralType to void* and vice versa. */ typedef union ValuePointer { LargestIntegralType value; struct { #if defined(WORDS_BIGENDIAN) && (WORDS_SIZEOF_VOID_P == 4) unsigned int padding; #endif void *pointer; } x; } ValuePointer; /* Doubly linked list node. */ typedef struct ListNode { const void *value; int refcount; struct ListNode *next; struct ListNode *prev; } ListNode; /* Debug information for malloc(). */ struct MallocBlockInfoData { void* block; /* Address of the block returned by malloc(). */ size_t allocated_size; /* Total size of the allocated block. */ size_t size; /* Request block size. */ SourceLocation location; /* Where the block was allocated. */ ListNode node; /* Node within list of all allocated blocks. */ }; typedef union { struct MallocBlockInfoData *data; char *ptr; } MallocBlockInfo; /* State of each test. */ typedef struct TestState { const ListNode *check_point; /* Check point of the test if there's a */ /* setup function. */ void *state; /* State associated with the test. */ } TestState; /* Determines whether two values are the same. */ typedef int (*EqualityFunction)(const void *left, const void *right); /* Value of a symbol and the place it was declared. */ typedef struct SymbolValue { SourceLocation location; LargestIntegralType value; } SymbolValue; /* * Contains a list of values for a symbol. * NOTE: Each structure referenced by symbol_values_list_head must have a * SourceLocation as its' first member. */ typedef struct SymbolMapValue { const char *symbol_name; ListNode symbol_values_list_head; } SymbolMapValue; /* Where a particular ordering was located and its symbol name */ typedef struct FuncOrderingValue { SourceLocation location; const char * function; } FuncOrderingValue; /* Used by list_free() to deallocate values referenced by list nodes. */ typedef void (*CleanupListValue)(const void *value, void *cleanup_value_data); /* Structure used to check the range of integer types.a */ typedef struct CheckIntegerRange { CheckParameterEvent event; LargestIntegralType minimum; LargestIntegralType maximum; } CheckIntegerRange; /* Structure used to check whether an integer value is in a set. */ typedef struct CheckIntegerSet { CheckParameterEvent event; const LargestIntegralType *set; size_t size_of_set; } CheckIntegerSet; /* Used to check whether a parameter matches the area of memory referenced by * this structure. */ typedef struct CheckMemoryData { CheckParameterEvent event; const void *memory; size_t size; } CheckMemoryData; static ListNode* list_initialize(ListNode * const node); static ListNode* list_add(ListNode * const head, ListNode *new_node); static ListNode* list_add_value(ListNode * const head, const void *value, const int count); static ListNode* list_remove( ListNode * const node, const CleanupListValue cleanup_value, void * const cleanup_value_data); static void list_remove_free( ListNode * const node, const CleanupListValue cleanup_value, void * const cleanup_value_data); static int list_empty(const ListNode * const head); static int list_find( ListNode * const head, const void *value, const EqualityFunction equal_func, ListNode **output); static int list_first(ListNode * const head, ListNode **output); static ListNode* list_free( ListNode * const head, const CleanupListValue cleanup_value, void * const cleanup_value_data); static void add_symbol_value( ListNode * const symbol_map_head, const char * const symbol_names[], const size_t number_of_symbol_names, const void* value, const int count); static int get_symbol_value( ListNode * const symbol_map_head, const char * const symbol_names[], const size_t number_of_symbol_names, void **output); static void free_value(const void *value, void *cleanup_value_data); static void free_symbol_map_value( const void *value, void *cleanup_value_data); static void remove_always_return_values(ListNode * const map_head, const size_t number_of_symbol_names); static size_t check_for_leftover_values_list(const ListNode * head, const char * const error_message); static size_t check_for_leftover_values( const ListNode * const map_head, const char * const error_message, const size_t number_of_symbol_names); static void remove_always_return_values_from_list(ListNode * const map_head); /* * This must be called at the beginning of a test to initialize some data * structures. */ static void initialize_testing(const char *test_name); /* This must be called at the end of a test to free() allocated structures. */ static void teardown_testing(const char *test_name); static enum cm_message_output cm_get_output(void); static int cm_error_message_enabled = 1; static CMOCKA_THREAD char *cm_error_message; void cm_print_error(const char * const format, ...) CMOCKA_PRINTF_ATTRIBUTE(1, 2); /* * Keeps track of the calling context returned by setenv() so that the fail() * method can jump out of a test. */ static CMOCKA_THREAD cm_jmp_buf global_run_test_env; static CMOCKA_THREAD int global_running_test = 0; /* Keeps track of the calling context returned by setenv() so that */ /* mock_assert() can optionally jump back to expect_assert_failure(). */ jmp_buf global_expect_assert_env; int global_expecting_assert = 0; const char *global_last_failed_assert = NULL; static int global_skip_test; /* Keeps a map of the values that functions will have to return to provide */ /* mocked interfaces. */ static CMOCKA_THREAD ListNode global_function_result_map_head; /* Location of the last mock value returned was declared. */ static CMOCKA_THREAD SourceLocation global_last_mock_value_location; /* Keeps a map of the values that functions expect as parameters to their * mocked interfaces. */ static CMOCKA_THREAD ListNode global_function_parameter_map_head; /* Location of last parameter value checked was declared. */ static CMOCKA_THREAD SourceLocation global_last_parameter_location; /* List (acting as FIFO) of call ordering. */ static CMOCKA_THREAD ListNode global_call_ordering_head; /* Location of last call ordering that was declared. */ static CMOCKA_THREAD SourceLocation global_last_call_ordering_location; /* List of all currently allocated blocks. */ static CMOCKA_THREAD ListNode global_allocated_blocks; static enum cm_message_output global_msg_output = CM_OUTPUT_STDOUT; static const char *global_test_filter_pattern; #ifndef _WIN32 /* Signals caught by exception_handler(). */ static const int exception_signals[] = { SIGFPE, SIGILL, SIGSEGV, #ifdef SIGBUS SIGBUS, #endif #ifdef SIGSYS SIGSYS, #endif }; /* Default signal functions that should be restored after a test is complete. */ typedef void (*SignalFunction)(int signal); static SignalFunction default_signal_functions[ ARRAY_SIZE(exception_signals)]; #else /* _WIN32 */ /* The default exception filter. */ static LPTOP_LEVEL_EXCEPTION_FILTER previous_exception_filter; /* Fatal exceptions. */ typedef struct ExceptionCodeInfo { DWORD code; const char* description; } ExceptionCodeInfo; #define EXCEPTION_CODE_INFO(exception_code) {exception_code, #exception_code} static const ExceptionCodeInfo exception_codes[] = { EXCEPTION_CODE_INFO(EXCEPTION_ACCESS_VIOLATION), EXCEPTION_CODE_INFO(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), EXCEPTION_CODE_INFO(EXCEPTION_DATATYPE_MISALIGNMENT), EXCEPTION_CODE_INFO(EXCEPTION_FLT_DENORMAL_OPERAND), EXCEPTION_CODE_INFO(EXCEPTION_FLT_DIVIDE_BY_ZERO), EXCEPTION_CODE_INFO(EXCEPTION_FLT_INEXACT_RESULT), EXCEPTION_CODE_INFO(EXCEPTION_FLT_INVALID_OPERATION), EXCEPTION_CODE_INFO(EXCEPTION_FLT_OVERFLOW), EXCEPTION_CODE_INFO(EXCEPTION_FLT_STACK_CHECK), EXCEPTION_CODE_INFO(EXCEPTION_FLT_UNDERFLOW), EXCEPTION_CODE_INFO(EXCEPTION_GUARD_PAGE), EXCEPTION_CODE_INFO(EXCEPTION_ILLEGAL_INSTRUCTION), EXCEPTION_CODE_INFO(EXCEPTION_INT_DIVIDE_BY_ZERO), EXCEPTION_CODE_INFO(EXCEPTION_INT_OVERFLOW), EXCEPTION_CODE_INFO(EXCEPTION_INVALID_DISPOSITION), EXCEPTION_CODE_INFO(EXCEPTION_INVALID_HANDLE), EXCEPTION_CODE_INFO(EXCEPTION_IN_PAGE_ERROR), EXCEPTION_CODE_INFO(EXCEPTION_NONCONTINUABLE_EXCEPTION), EXCEPTION_CODE_INFO(EXCEPTION_PRIV_INSTRUCTION), EXCEPTION_CODE_INFO(EXCEPTION_STACK_OVERFLOW), }; #endif /* !_WIN32 */ enum CMUnitTestStatus { CM_TEST_NOT_STARTED, CM_TEST_PASSED, CM_TEST_FAILED, CM_TEST_ERROR, CM_TEST_SKIPPED, }; struct CMUnitTestState { const ListNode *check_point; /* Check point of the test if there's a setup function. */ const struct CMUnitTest *test; /* Point to array element in the tests we get passed */ void *state; /* State associated with the test */ const char *error_message; /* The error messages by the test */ enum CMUnitTestStatus status; /* PASSED, FAILED, ABORT ... */ double runtime; /* Time calculations */ }; /* Exit the currently executing test. */ static void exit_test(const int quit_application) { const char *env = getenv("CMOCKA_TEST_ABORT"); int abort_test = 0; if (env != NULL && strlen(env) == 1) { abort_test = (env[0] == '1'); } if (global_skip_test == 0 && abort_test == 1) { print_error("%s", cm_error_message); abort(); } else if (global_running_test) { cm_longjmp(global_run_test_env, 1); } else if (quit_application) { exit(-1); } } void _skip(const char * const file, const int line) { cm_print_error(SOURCE_LOCATION_FORMAT ": Skipped!\n", file, line); global_skip_test = 1; exit_test(1); } /* Initialize a SourceLocation structure. */ static void initialize_source_location(SourceLocation * const location) { assert_non_null(location); location->file = NULL; location->line = 0; } /* Determine whether a source location is currently set. */ static int source_location_is_set(const SourceLocation * const location) { assert_non_null(location); return location->file && location->line; } /* Set a source location. */ static void set_source_location( SourceLocation * const location, const char * const file, const int line) { assert_non_null(location); location->file = file; location->line = line; } static int c_strreplace(char *src, size_t src_len, const char *pattern, const char *repl, int *str_replaced) { char *p = NULL; p = strstr(src, pattern); if (p == NULL) { return -1; } do { size_t of = p - src; size_t l = strlen(src); size_t pl = strlen(pattern); size_t rl = strlen(repl); /* overflow check */ if (src_len <= l + MAX(pl, rl) + 1) { return -1; } if (rl != pl) { memmove(src + of + rl, src + of + pl, l - of - pl + 1); } memcpy(src + of, repl, rl); if (str_replaced != NULL) { *str_replaced = 1; } p = strstr(src, pattern); } while (p != NULL); return 0; } static int c_strmatch(const char *str, const char *pattern) { int ok; if (str == NULL || pattern == NULL) { return 0; } for (;;) { /* Check if pattern is done */ if (*pattern == '\0') { /* If string is at the end, we're good */ if (*str == '\0') { return 1; } return 0; } if (*pattern == '*') { /* Move on */ pattern++; /* If we are at the end, everything is fine */ if (*pattern == '\0') { return 1; } /* Try to match each position */ for (; *str != '\0'; str++) { ok = c_strmatch(str, pattern); if (ok) { return 1; } } /* No match */ return 0; } /* If we are at the end, leave */ if (*str == '\0') { return 0; } /* Check if we have a single wildcard or matching char */ if (*pattern != '?' && *str != *pattern) { return 0; } /* Move string and pattern */ str++; pattern++; } return 0; } /* Create function results and expected parameter lists. */ void initialize_testing(const char *test_name) { (void)test_name; list_initialize(&global_function_result_map_head); initialize_source_location(&global_last_mock_value_location); list_initialize(&global_function_parameter_map_head); initialize_source_location(&global_last_parameter_location); list_initialize(&global_call_ordering_head); initialize_source_location(&global_last_parameter_location); } static void fail_if_leftover_values(const char *test_name) { int error_occurred = 0; (void)test_name; remove_always_return_values(&global_function_result_map_head, 1); if (check_for_leftover_values( &global_function_result_map_head, "%s() has remaining non-returned values.\n", 1)) { error_occurred = 1; } remove_always_return_values(&global_function_parameter_map_head, 2); if (check_for_leftover_values( &global_function_parameter_map_head, "'%s' parameter still has values that haven't been checked.\n", 2)) { error_occurred = 1; } remove_always_return_values_from_list(&global_call_ordering_head); if (check_for_leftover_values_list(&global_call_ordering_head, "%s function was expected to be called but was not not.\n")) { error_occurred = 1; } if (error_occurred) { exit_test(1); } } static void teardown_testing(const char *test_name) { (void)test_name; list_free(&global_function_result_map_head, free_symbol_map_value, (void*)0); initialize_source_location(&global_last_mock_value_location); list_free(&global_function_parameter_map_head, free_symbol_map_value, (void*)1); initialize_source_location(&global_last_parameter_location); list_free(&global_call_ordering_head, free_value, (void*)0); initialize_source_location(&global_last_call_ordering_location); } /* Initialize a list node. */ static ListNode* list_initialize(ListNode * const node) { node->value = NULL; node->next = node; node->prev = node; node->refcount = 1; return node; } /* * Adds a value at the tail of a given list. * The node referencing the value is allocated from the heap. */ static ListNode* list_add_value(ListNode * const head, const void *value, const int refcount) { ListNode * const new_node = (ListNode*)malloc(sizeof(ListNode)); assert_non_null(head); assert_non_null(value); new_node->value = value; new_node->refcount = refcount; return list_add(head, new_node); } /* Add new_node to the end of the list. */ static ListNode* list_add(ListNode * const head, ListNode *new_node) { assert_non_null(head); assert_non_null(new_node); new_node->next = head; new_node->prev = head->prev; head->prev->next = new_node; head->prev = new_node; return new_node; } /* Remove a node from a list. */ static ListNode* list_remove( ListNode * const node, const CleanupListValue cleanup_value, void * const cleanup_value_data) { assert_non_null(node); node->prev->next = node->next; node->next->prev = node->prev; if (cleanup_value) { cleanup_value(node->value, cleanup_value_data); } return node; } /* Remove a list node from a list and free the node. */ static void list_remove_free( ListNode * const node, const CleanupListValue cleanup_value, void * const cleanup_value_data) { assert_non_null(node); free(list_remove(node, cleanup_value, cleanup_value_data)); } /* * Frees memory kept by a linked list The cleanup_value function is called for * every "value" field of nodes in the list, except for the head. In addition * to each list value, cleanup_value_data is passed to each call to * cleanup_value. The head of the list is not deallocated. */ static ListNode* list_free( ListNode * const head, const CleanupListValue cleanup_value, void * const cleanup_value_data) { assert_non_null(head); while (!list_empty(head)) { list_remove_free(head->next, cleanup_value, cleanup_value_data); } return head; } /* Determine whether a list is empty. */ static int list_empty(const ListNode * const head) { assert_non_null(head); return head->next == head; } /* * Find a value in the list using the equal_func to compare each node with the * value. */ static int list_find(ListNode * const head, const void *value, const EqualityFunction equal_func, ListNode **output) { ListNode *current; assert_non_null(head); for (current = head->next; current != head; current = current->next) { if (equal_func(current->value, value)) { *output = current; return 1; } } return 0; } /* Returns the first node of a list */ static int list_first(ListNode * const head, ListNode **output) { ListNode *target_node = NULL; assert_non_null(head); if (list_empty(head)) { return 0; } target_node = head->next; *output = target_node; return 1; } /* Deallocate a value referenced by a list. */ static void free_value(const void *value, void *cleanup_value_data) { (void)cleanup_value_data; assert_non_null(value); free((void*)value); } /* Releases memory associated to a symbol_map_value. */ static void free_symbol_map_value(const void *value, void *cleanup_value_data) { SymbolMapValue * const map_value = (SymbolMapValue*)value; const LargestIntegralType children = cast_ptr_to_largest_integral_type(cleanup_value_data); assert_non_null(value); list_free(&map_value->symbol_values_list_head, children ? free_symbol_map_value : free_value, (void *) ((uintptr_t)children - 1)); free(map_value); } /* * Determine whether a symbol name referenced by a symbol_map_value matches the * specified function name. */ static int symbol_names_match(const void *map_value, const void *symbol) { return !strcmp(((SymbolMapValue*)map_value)->symbol_name, (const char*)symbol); } /* * Adds a value to the queue of values associated with the given hierarchy of * symbols. It's assumed value is allocated from the heap. */ static void add_symbol_value(ListNode * const symbol_map_head, const char * const symbol_names[], const size_t number_of_symbol_names, const void* value, const int refcount) { const char* symbol_name; ListNode *target_node; SymbolMapValue *target_map_value; assert_non_null(symbol_map_head); assert_non_null(symbol_names); assert_true(number_of_symbol_names); symbol_name = symbol_names[0]; if (!list_find(symbol_map_head, symbol_name, symbol_names_match, &target_node)) { SymbolMapValue * const new_symbol_map_value = (SymbolMapValue*)malloc(sizeof(*new_symbol_map_value)); new_symbol_map_value->symbol_name = symbol_name; list_initialize(&new_symbol_map_value->symbol_values_list_head); target_node = list_add_value(symbol_map_head, new_symbol_map_value, 1); } target_map_value = (SymbolMapValue*)target_node->value; if (number_of_symbol_names == 1) { list_add_value(&target_map_value->symbol_values_list_head, value, refcount); } else { add_symbol_value(&target_map_value->symbol_values_list_head, &symbol_names[1], number_of_symbol_names - 1, value, refcount); } } /* * Gets the next value associated with the given hierarchy of symbols. * The value is returned as an output parameter with the function returning the * node's old refcount value if a value is found, 0 otherwise. This means that * a return value of 1 indicates the node was just removed from the list. */ static int get_symbol_value( ListNode * const head, const char * const symbol_names[], const size_t number_of_symbol_names, void **output) { const char* symbol_name = NULL; ListNode *target_node = NULL; assert_non_null(head); assert_non_null(symbol_names); assert_true(number_of_symbol_names); assert_non_null(output); symbol_name = symbol_names[0]; if (list_find(head, symbol_name, symbol_names_match, &target_node)) { SymbolMapValue *map_value = NULL; ListNode *child_list = NULL; int return_value = 0; assert_non_null(target_node); assert_non_null(target_node->value); map_value = (SymbolMapValue*)target_node->value; child_list = &map_value->symbol_values_list_head; if (number_of_symbol_names == 1) { ListNode *value_node = NULL; return_value = list_first(child_list, &value_node); assert_true(return_value); /* Add a check to silence clang analyzer */ if (return_value == 0) { goto out; } *output = (void*) value_node->value; return_value = value_node->refcount; if (value_node->refcount - 1 == 0) { list_remove_free(value_node, NULL, NULL); } else if (value_node->refcount > WILL_RETURN_ONCE) { --value_node->refcount; } } else { return_value = get_symbol_value( child_list, &symbol_names[1], number_of_symbol_names - 1, output); } if (list_empty(child_list)) { list_remove_free(target_node, free_symbol_map_value, (void*)0); } return return_value; } out: cm_print_error("No entries for symbol %s.\n", symbol_name); return 0; } /** * Taverse a list of nodes and remove first symbol value in list that has a * refcount < -1 (i.e. should always be returned and has been returned at * least once). */ static void remove_always_return_values_from_list(ListNode * const map_head) { ListNode * current = NULL; ListNode * next = NULL; assert_non_null(map_head); for (current = map_head->next, next = current->next; current != map_head; current = next, next = current->next) { if (current->refcount < -1) { list_remove_free(current, free_value, NULL); } } } /* * Traverse down a tree of symbol values and remove the first symbol value * in each branch that has a refcount < -1 (i.e should always be returned * and has been returned at least once). */ static void remove_always_return_values(ListNode * const map_head, const size_t number_of_symbol_names) { ListNode *current; assert_non_null(map_head); assert_true(number_of_symbol_names); current = map_head->next; while (current != map_head) { SymbolMapValue * const value = (SymbolMapValue*)current->value; ListNode * const next = current->next; ListNode *child_list; assert_non_null(value); child_list = &value->symbol_values_list_head; if (!list_empty(child_list)) { if (number_of_symbol_names == 1) { ListNode * const child_node = child_list->next; /* If this item has been returned more than once, free it. */ if (child_node->refcount < -1) { list_remove_free(child_node, free_value, NULL); } } else { remove_always_return_values(child_list, number_of_symbol_names - 1); } } if (list_empty(child_list)) { list_remove_free(current, free_value, NULL); } current = next; } } static size_t check_for_leftover_values_list(const ListNode * head, const char * const error_message) { ListNode *child_node; size_t leftover_count = 0; if (!list_empty(head)) { for (child_node = head->next; child_node != head; child_node = child_node->next, ++leftover_count) { const FuncOrderingValue *const o = (const FuncOrderingValue*) child_node->value; cm_print_error(error_message, o->function); cm_print_error(SOURCE_LOCATION_FORMAT ": note: remaining item was declared here\n", o->location.file, o->location.line); } } return leftover_count; } /* * Checks if there are any leftover values set up by the test that were never * retrieved through execution, and fail the test if that is the case. */ static size_t check_for_leftover_values( const ListNode * const map_head, const char * const error_message, const size_t number_of_symbol_names) { const ListNode *current; size_t symbols_with_leftover_values = 0; assert_non_null(map_head); assert_true(number_of_symbol_names); for (current = map_head->next; current != map_head; current = current->next) { const SymbolMapValue * const value = (SymbolMapValue*)current->value; const ListNode *child_list; assert_non_null(value); child_list = &value->symbol_values_list_head; if (!list_empty(child_list)) { if (number_of_symbol_names == 1) { const ListNode *child_node; cm_print_error(error_message, value->symbol_name); for (child_node = child_list->next; child_node != child_list; child_node = child_node->next) { const SourceLocation * const location = (const SourceLocation*)child_node->value; cm_print_error(SOURCE_LOCATION_FORMAT ": note: remaining item was declared here\n", location->file, location->line); } } else { cm_print_error("%s: ", value->symbol_name); check_for_leftover_values(child_list, error_message, number_of_symbol_names - 1); } symbols_with_leftover_values ++; } } return symbols_with_leftover_values; } /* Get the next return value for the specified mock function. */ LargestIntegralType _mock(const char * const function, const char* const file, const int line) { void *result; const int rc = get_symbol_value(&global_function_result_map_head, &function, 1, &result); if (rc) { SymbolValue * const symbol = (SymbolValue*)result; const LargestIntegralType value = symbol->value; global_last_mock_value_location = symbol->location; if (rc == 1) { free(symbol); } return value; } else { cm_print_error(SOURCE_LOCATION_FORMAT ": error: Could not get value " "to mock function %s\n", file, line, function); if (source_location_is_set(&global_last_mock_value_location)) { cm_print_error(SOURCE_LOCATION_FORMAT ": note: Previously returned mock value was declared here\n", global_last_mock_value_location.file, global_last_mock_value_location.line); } else { cm_print_error("There were no previously returned mock values for " "this test.\n"); } exit_test(1); } return 0; } /* Ensure that function is being called in proper order */ void _function_called(const char *const function, const char *const file, const int line) { ListNode *first_value_node = NULL; ListNode *value_node = NULL; int rc; rc = list_first(&global_call_ordering_head, &value_node); first_value_node = value_node; if (rc) { FuncOrderingValue *expected_call; int cmp; expected_call = (FuncOrderingValue *)value_node->value; cmp = strcmp(expected_call->function, function); if (value_node->refcount < -1) { /* * Search through value nodes until either function is found or * encounter a non-zero refcount greater than -2 */ if (cmp != 0) { value_node = value_node->next; expected_call = (FuncOrderingValue *)value_node->value; cmp = strcmp(expected_call->function, function); while (value_node->refcount < -1 && cmp != 0 && value_node != first_value_node->prev) { value_node = value_node->next; if (value_node == NULL) { break; } expected_call = (FuncOrderingValue *)value_node->value; if (expected_call == NULL) { continue; } cmp = strcmp(expected_call->function, function); } if (expected_call == NULL || value_node == first_value_node->prev) { cm_print_error(SOURCE_LOCATION_FORMAT ": error: No expected mock calls matching " "called() invocation in %s", file, line, function); exit_test(1); } } } if (cmp == 0) { if (value_node->refcount > -2 && --value_node->refcount == 0) { list_remove_free(value_node, free_value, NULL); } } else { cm_print_error(SOURCE_LOCATION_FORMAT ": error: Expected call to %s but received called() " "in %s\n", file, line, expected_call->function, function); exit_test(1); } } else { cm_print_error(SOURCE_LOCATION_FORMAT ": error: No mock calls expected but called() was " "invoked in %s\n", file, line, function); exit_test(1); } } /* Add a return value for the specified mock function name. */ void _will_return(const char * const function_name, const char * const file, const int line, const LargestIntegralType value, const int count) { SymbolValue * const return_value = (SymbolValue*)malloc(sizeof(*return_value)); assert_true(count != 0); return_value->value = value; set_source_location(&return_value->location, file, line); add_symbol_value(&global_function_result_map_head, &function_name, 1, return_value, count); } /* * Add a custom parameter checking function. If the event parameter is NULL * the event structure is allocated internally by this function. If event * parameter is provided it must be allocated on the heap and doesn't need to * be deallocated by the caller. */ void _expect_check( const char* const function, const char* const parameter, const char* const file, const int line, const CheckParameterValue check_function, const LargestIntegralType check_data, CheckParameterEvent * const event, const int count) { CheckParameterEvent * const check = event ? event : (CheckParameterEvent*)malloc(sizeof(*check)); const char* symbols[] = {function, parameter}; check->parameter_name = parameter; check->check_value = check_function; check->check_value_data = check_data; set_source_location(&check->location, file, line); add_symbol_value(&global_function_parameter_map_head, symbols, 2, check, count); } /* * Add an call expectations that a particular function is called correctly. * This is used for code under test that makes calls to several functions * in depended upon components (mocks). */ void _expect_function_call( const char * const function_name, const char * const file, const int line, const int count) { FuncOrderingValue *ordering; assert_non_null(function_name); assert_non_null(file); assert_true(count != 0); ordering = (FuncOrderingValue *)malloc(sizeof(*ordering)); set_source_location(&ordering->location, file, line); ordering->function = function_name; list_add_value(&global_call_ordering_head, ordering, count); } /* Returns 1 if the specified values are equal. If the values are not equal * an error is displayed and 0 is returned. */ static int values_equal_display_error(const LargestIntegralType left, const LargestIntegralType right) { const int equal = left == right; if (!equal) { cm_print_error(LargestIntegralTypePrintfFormat " != " LargestIntegralTypePrintfFormat "\n", left, right); } return equal; } /* * Returns 1 if the specified values are not equal. If the values are equal * an error is displayed and 0 is returned. */ static int values_not_equal_display_error(const LargestIntegralType left, const LargestIntegralType right) { const int not_equal = left != right; if (!not_equal) { cm_print_error(LargestIntegralTypePrintfFormat " == " LargestIntegralTypePrintfFormat "\n", left, right); } return not_equal; } /* * Determine whether value is contained within check_integer_set. * If invert is 0 and the value is in the set 1 is returned, otherwise 0 is * returned and an error is displayed. If invert is 1 and the value is not * in the set 1 is returned, otherwise 0 is returned and an error is * displayed. */ static int value_in_set_display_error( const LargestIntegralType value, const CheckIntegerSet * const check_integer_set, const int invert) { int succeeded = invert; assert_non_null(check_integer_set); { const LargestIntegralType * const set = check_integer_set->set; const size_t size_of_set = check_integer_set->size_of_set; size_t i; for (i = 0; i < size_of_set; i++) { if (set[i] == value) { /* If invert = 0 and item is found, succeeded = 1. */ /* If invert = 1 and item is found, succeeded = 0. */ succeeded = !succeeded; break; } } if (succeeded) { return 1; } cm_print_error(LargestIntegralTypePrintfFormatDecimal " is %sin the set (", value, invert ? "" : "not "); for (i = 0; i < size_of_set; i++) { cm_print_error(LargestIntegralTypePrintfFormat ", ", set[i]); } cm_print_error(")\n"); } return 0; } /* * Determine whether a value is within the specified range. If the value is * within the specified range 1 is returned. If the value isn't within the * specified range an error is displayed and 0 is returned. */ static int integer_in_range_display_error( const LargestIntegralType value, const LargestIntegralType range_min, const LargestIntegralType range_max) { if (value >= range_min && value <= range_max) { return 1; } cm_print_error(LargestIntegralTypePrintfFormatDecimal " is not within the range " LargestIntegralTypePrintfFormatDecimal "-" LargestIntegralTypePrintfFormatDecimal "\n", value, range_min, range_max); return 0; } /* * Determine whether a value is within the specified range. If the value * is not within the range 1 is returned. If the value is within the * specified range an error is displayed and zero is returned. */ static int integer_not_in_range_display_error( const LargestIntegralType value, const LargestIntegralType range_min, const LargestIntegralType range_max) { if (value < range_min || value > range_max) { return 1; } cm_print_error(LargestIntegralTypePrintfFormatDecimal " is within the range " LargestIntegralTypePrintfFormatDecimal "-" LargestIntegralTypePrintfFormatDecimal "\n", value, range_min, range_max); return 0; } /* * Determine whether the specified strings are equal. If the strings are equal * 1 is returned. If they're not equal an error is displayed and 0 is * returned. */ static int string_equal_display_error( const char * const left, const char * const right) { if (strcmp(left, right) == 0) { return 1; } cm_print_error("\"%s\" != \"%s\"\n", left, right); return 0; } /* * Determine whether the specified strings are equal. If the strings are not * equal 1 is returned. If they're not equal an error is displayed and 0 is * returned */ static int string_not_equal_display_error( const char * const left, const char * const right) { if (strcmp(left, right) != 0) { return 1; } cm_print_error("\"%s\" == \"%s\"\n", left, right); return 0; } /* * Determine whether the specified areas of memory are equal. If they're equal * 1 is returned otherwise an error is displayed and 0 is returned. */ static int memory_equal_display_error(const char* const a, const char* const b, const size_t size) { size_t differences = 0; size_t i; for (i = 0; i < size; i++) { const char l = a[i]; const char r = b[i]; if (l != r) { if (differences < 16) { cm_print_error("difference at offset %" PRIdS " 0x%02x 0x%02x\n", i, l, r); } differences ++; } } if (differences > 0) { if (differences >= 16) { cm_print_error("...\n"); } cm_print_error("%"PRIdS " bytes of %p and %p differ\n", differences, (void *)a, (void *)b); return 0; } return 1; } /* * Determine whether the specified areas of memory are not equal. If they're * not equal 1 is returned otherwise an error is displayed and 0 is * returned. */ static int memory_not_equal_display_error( const char* const a, const char* const b, const size_t size) { size_t same = 0; size_t i; for (i = 0; i < size; i++) { const char l = a[i]; const char r = b[i]; if (l == r) { same ++; } } if (same == size) { cm_print_error("%"PRIdS "bytes of %p and %p the same\n", same, (void *)a, (void *)b); return 0; } return 1; } /* CheckParameterValue callback to check whether a value is within a set. */ static int check_in_set(const LargestIntegralType value, const LargestIntegralType check_value_data) { return value_in_set_display_error(value, cast_largest_integral_type_to_pointer(CheckIntegerSet*, check_value_data), 0); } /* CheckParameterValue callback to check whether a value isn't within a set. */ static int check_not_in_set(const LargestIntegralType value, const LargestIntegralType check_value_data) { return value_in_set_display_error(value, cast_largest_integral_type_to_pointer(CheckIntegerSet*, check_value_data), 1); } /* Create the callback data for check_in_set() or check_not_in_set() and * register a check event. */ static void expect_set( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType values[], const size_t number_of_values, const CheckParameterValue check_function, const int count) { CheckIntegerSet * const check_integer_set = (CheckIntegerSet*)malloc(sizeof(*check_integer_set) + (sizeof(values[0]) * number_of_values)); LargestIntegralType * const set = (LargestIntegralType*)( check_integer_set + 1); declare_initialize_value_pointer_pointer(check_data, check_integer_set); assert_non_null(values); assert_true(number_of_values); memcpy(set, values, number_of_values * sizeof(values[0])); check_integer_set->set = set; check_integer_set->size_of_set = number_of_values; _expect_check( function, parameter, file, line, check_function, check_data.value, &check_integer_set->event, count); } /* Add an event to check whether a value is in a set. */ void _expect_in_set( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType values[], const size_t number_of_values, const int count) { expect_set(function, parameter, file, line, values, number_of_values, check_in_set, count); } /* Add an event to check whether a value isn't in a set. */ void _expect_not_in_set( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType values[], const size_t number_of_values, const int count) { expect_set(function, parameter, file, line, values, number_of_values, check_not_in_set, count); } /* CheckParameterValue callback to check whether a value is within a range. */ static int check_in_range(const LargestIntegralType value, const LargestIntegralType check_value_data) { CheckIntegerRange * const check_integer_range = cast_largest_integral_type_to_pointer(CheckIntegerRange*, check_value_data); assert_non_null(check_integer_range); return integer_in_range_display_error(value, check_integer_range->minimum, check_integer_range->maximum); } /* CheckParameterValue callback to check whether a value is not within a range. */ static int check_not_in_range(const LargestIntegralType value, const LargestIntegralType check_value_data) { CheckIntegerRange * const check_integer_range = cast_largest_integral_type_to_pointer(CheckIntegerRange*, check_value_data); assert_non_null(check_integer_range); return integer_not_in_range_display_error( value, check_integer_range->minimum, check_integer_range->maximum); } /* Create the callback data for check_in_range() or check_not_in_range() and * register a check event. */ static void expect_range( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType minimum, const LargestIntegralType maximum, const CheckParameterValue check_function, const int count) { CheckIntegerRange * const check_integer_range = (CheckIntegerRange*)malloc(sizeof(*check_integer_range)); declare_initialize_value_pointer_pointer(check_data, check_integer_range); check_integer_range->minimum = minimum; check_integer_range->maximum = maximum; _expect_check(function, parameter, file, line, check_function, check_data.value, &check_integer_range->event, count); } /* Add an event to determine whether a parameter is within a range. */ void _expect_in_range( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType minimum, const LargestIntegralType maximum, const int count) { expect_range(function, parameter, file, line, minimum, maximum, check_in_range, count); } /* Add an event to determine whether a parameter is not within a range. */ void _expect_not_in_range( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType minimum, const LargestIntegralType maximum, const int count) { expect_range(function, parameter, file, line, minimum, maximum, check_not_in_range, count); } /* CheckParameterValue callback to check whether a value is equal to an * expected value. */ static int check_value(const LargestIntegralType value, const LargestIntegralType check_value_data) { return values_equal_display_error(value, check_value_data); } /* Add an event to check a parameter equals an expected value. */ void _expect_value( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType value, const int count) { _expect_check(function, parameter, file, line, check_value, value, NULL, count); } /* CheckParameterValue callback to check whether a value is not equal to an * expected value. */ static int check_not_value(const LargestIntegralType value, const LargestIntegralType check_value_data) { return values_not_equal_display_error(value, check_value_data); } /* Add an event to check a parameter is not equal to an expected value. */ void _expect_not_value( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType value, const int count) { _expect_check(function, parameter, file, line, check_not_value, value, NULL, count); } /* CheckParameterValue callback to check whether a parameter equals a string. */ static int check_string(const LargestIntegralType value, const LargestIntegralType check_value_data) { return string_equal_display_error( cast_largest_integral_type_to_pointer(char*, value), cast_largest_integral_type_to_pointer(char*, check_value_data)); } /* Add an event to check whether a parameter is equal to a string. */ void _expect_string( const char* const function, const char* const parameter, const char* const file, const int line, const char* string, const int count) { declare_initialize_value_pointer_pointer(string_pointer, discard_const(string)); _expect_check(function, parameter, file, line, check_string, string_pointer.value, NULL, count); } /* CheckParameterValue callback to check whether a parameter is not equals to * a string. */ static int check_not_string(const LargestIntegralType value, const LargestIntegralType check_value_data) { return string_not_equal_display_error( cast_largest_integral_type_to_pointer(char*, value), cast_largest_integral_type_to_pointer(char*, check_value_data)); } /* Add an event to check whether a parameter is not equal to a string. */ void _expect_not_string( const char* const function, const char* const parameter, const char* const file, const int line, const char* string, const int count) { declare_initialize_value_pointer_pointer(string_pointer, discard_const(string)); _expect_check(function, parameter, file, line, check_not_string, string_pointer.value, NULL, count); } /* CheckParameterValue callback to check whether a parameter equals an area of * memory. */ static int check_memory(const LargestIntegralType value, const LargestIntegralType check_value_data) { CheckMemoryData * const check = cast_largest_integral_type_to_pointer( CheckMemoryData*, check_value_data); assert_non_null(check); return memory_equal_display_error( cast_largest_integral_type_to_pointer(const char*, value), (const char*)check->memory, check->size); } /* Create the callback data for check_memory() or check_not_memory() and * register a check event. */ static void expect_memory_setup( const char* const function, const char* const parameter, const char* const file, const int line, const void * const memory, const size_t size, const CheckParameterValue check_function, const int count) { CheckMemoryData * const check_data = (CheckMemoryData*)malloc(sizeof(*check_data) + size); void * const mem = (void*)(check_data + 1); declare_initialize_value_pointer_pointer(check_data_pointer, check_data); assert_non_null(memory); assert_true(size); memcpy(mem, memory, size); check_data->memory = mem; check_data->size = size; _expect_check(function, parameter, file, line, check_function, check_data_pointer.value, &check_data->event, count); } /* Add an event to check whether a parameter matches an area of memory. */ void _expect_memory( const char* const function, const char* const parameter, const char* const file, const int line, const void* const memory, const size_t size, const int count) { expect_memory_setup(function, parameter, file, line, memory, size, check_memory, count); } /* CheckParameterValue callback to check whether a parameter is not equal to * an area of memory. */ static int check_not_memory(const LargestIntegralType value, const LargestIntegralType check_value_data) { CheckMemoryData * const check = cast_largest_integral_type_to_pointer( CheckMemoryData*, check_value_data); assert_non_null(check); return memory_not_equal_display_error( cast_largest_integral_type_to_pointer(const char*, value), (const char*)check->memory, check->size); } /* Add an event to check whether a parameter doesn't match an area of memory. */ void _expect_not_memory( const char* const function, const char* const parameter, const char* const file, const int line, const void* const memory, const size_t size, const int count) { expect_memory_setup(function, parameter, file, line, memory, size, check_not_memory, count); } /* CheckParameterValue callback that always returns 1. */ static int check_any(const LargestIntegralType value, const LargestIntegralType check_value_data) { (void)value; (void)check_value_data; return 1; } /* Add an event to allow any value for a parameter. */ void _expect_any( const char* const function, const char* const parameter, const char* const file, const int line, const int count) { _expect_check(function, parameter, file, line, check_any, 0, NULL, count); } void _check_expected( const char * const function_name, const char * const parameter_name, const char* file, const int line, const LargestIntegralType value) { void *result = NULL; const char* symbols[] = {function_name, parameter_name}; const int rc = get_symbol_value(&global_function_parameter_map_head, symbols, 2, &result); if (rc) { CheckParameterEvent * const check = (CheckParameterEvent*)result; int check_succeeded; global_last_parameter_location = check->location; check_succeeded = check->check_value(value, check->check_value_data); if (rc == 1) { free(check); } if (!check_succeeded) { cm_print_error(SOURCE_LOCATION_FORMAT ": error: Check of parameter %s, function %s failed\n" SOURCE_LOCATION_FORMAT ": note: Expected parameter declared here\n", file, line, parameter_name, function_name, global_last_parameter_location.file, global_last_parameter_location.line); _fail(file, line); } } else { cm_print_error(SOURCE_LOCATION_FORMAT ": error: Could not get value " "to check parameter %s of function %s\n", file, line, parameter_name, function_name); if (source_location_is_set(&global_last_parameter_location)) { cm_print_error(SOURCE_LOCATION_FORMAT ": note: Previously declared parameter value was declared here\n", global_last_parameter_location.file, global_last_parameter_location.line); } else { cm_print_error("There were no previously declared parameter values " "for this test.\n"); } exit_test(1); } } /* Replacement for assert. */ void mock_assert(const int result, const char* const expression, const char* const file, const int line) { if (!result) { if (global_expecting_assert) { global_last_failed_assert = expression; longjmp(global_expect_assert_env, result); } else { cm_print_error("ASSERT: %s\n", expression); _fail(file, line); } } } void _assert_true(const LargestIntegralType result, const char * const expression, const char * const file, const int line) { if (!result) { cm_print_error("%s\n", expression); _fail(file, line); } } void _assert_return_code(const LargestIntegralType result, size_t rlen, const LargestIntegralType error, const char * const expression, const char * const file, const int line) { LargestIntegralType valmax; switch (rlen) { case 1: valmax = 255; break; case 2: valmax = 32767; break; case 4: valmax = 2147483647; break; case 8: default: if (rlen > sizeof(valmax)) { valmax = 2147483647; } else { valmax = 9223372036854775807L; } break; } if (result > valmax - 1) { if (error > 0) { cm_print_error("%s < 0, errno(" LargestIntegralTypePrintfFormatDecimal "): %s\n", expression, error, strerror((int)error)); } else { cm_print_error("%s < 0\n", expression); } _fail(file, line); } } void _assert_int_equal( const LargestIntegralType a, const LargestIntegralType b, const char * const file, const int line) { if (!values_equal_display_error(a, b)) { _fail(file, line); } } void _assert_int_not_equal( const LargestIntegralType a, const LargestIntegralType b, const char * const file, const int line) { if (!values_not_equal_display_error(a, b)) { _fail(file, line); } } void _assert_string_equal(const char * const a, const char * const b, const char * const file, const int line) { if (!string_equal_display_error(a, b)) { _fail(file, line); } } void _assert_string_not_equal(const char * const a, const char * const b, const char *file, const int line) { if (!string_not_equal_display_error(a, b)) { _fail(file, line); } } void _assert_memory_equal(const void * const a, const void * const b, const size_t size, const char* const file, const int line) { if (!memory_equal_display_error((const char*)a, (const char*)b, size)) { _fail(file, line); } } void _assert_memory_not_equal(const void * const a, const void * const b, const size_t size, const char* const file, const int line) { if (!memory_not_equal_display_error((const char*)a, (const char*)b, size)) { _fail(file, line); } } void _assert_in_range( const LargestIntegralType value, const LargestIntegralType minimum, const LargestIntegralType maximum, const char* const file, const int line) { if (!integer_in_range_display_error(value, minimum, maximum)) { _fail(file, line); } } void _assert_not_in_range( const LargestIntegralType value, const LargestIntegralType minimum, const LargestIntegralType maximum, const char* const file, const int line) { if (!integer_not_in_range_display_error(value, minimum, maximum)) { _fail(file, line); } } void _assert_in_set(const LargestIntegralType value, const LargestIntegralType values[], const size_t number_of_values, const char* const file, const int line) { CheckIntegerSet check_integer_set; check_integer_set.set = values; check_integer_set.size_of_set = number_of_values; if (!value_in_set_display_error(value, &check_integer_set, 0)) { _fail(file, line); } } void _assert_not_in_set(const LargestIntegralType value, const LargestIntegralType values[], const size_t number_of_values, const char* const file, const int line) { CheckIntegerSet check_integer_set; check_integer_set.set = values; check_integer_set.size_of_set = number_of_values; if (!value_in_set_display_error(value, &check_integer_set, 1)) { _fail(file, line); } } /* Get the list of allocated blocks. */ static ListNode* get_allocated_blocks_list(void) { /* If it initialized, initialize the list of allocated blocks. */ if (!global_allocated_blocks.value) { list_initialize(&global_allocated_blocks); global_allocated_blocks.value = (void*)1; } return &global_allocated_blocks; } static void *libc_malloc(size_t size) { #undef malloc return malloc(size); #define malloc test_malloc } static void libc_free(void *ptr) { #undef free free(ptr); #define free test_free } static void *libc_realloc(void *ptr, size_t size) { #undef realloc return realloc(ptr, size); #define realloc test_realloc } static void vcm_print_error(const char* const format, va_list args) CMOCKA_PRINTF_ATTRIBUTE(1, 0); /* It's important to use the libc malloc and free here otherwise * the automatic free of leaked blocks can reap the error messages */ static void vcm_print_error(const char* const format, va_list args) { char buffer[1024]; size_t msg_len = 0; va_list ap; int len; va_copy(ap, args); len = vsnprintf(buffer, sizeof(buffer), format, args); if (len < 0) { /* TODO */ goto end; } if (cm_error_message == NULL) { /* CREATE MESSAGE */ cm_error_message = libc_malloc(len + 1); if (cm_error_message == NULL) { /* TODO */ goto end; } } else { /* APPEND MESSAGE */ char *tmp; msg_len = strlen(cm_error_message); tmp = libc_realloc(cm_error_message, msg_len + len + 1); if (tmp == NULL) { goto end; } cm_error_message = tmp; } if (((size_t)len) < sizeof(buffer)) { /* Use len + 1 to also copy '\0' */ memcpy(cm_error_message + msg_len, buffer, len + 1); } else { vsnprintf(cm_error_message + msg_len, len, format, ap); } end: va_end(ap); } static void vcm_free_error(char *err_msg) { libc_free(err_msg); } /* Use the real malloc in this function. */ #undef malloc void* _test_malloc(const size_t size, const char* file, const int line) { char *ptr = NULL; MallocBlockInfo block_info; ListNode * const block_list = get_allocated_blocks_list(); size_t allocate_size; char *block = NULL; allocate_size = size + (MALLOC_GUARD_SIZE * 2) + sizeof(struct MallocBlockInfoData) + MALLOC_ALIGNMENT; assert_true(allocate_size > size); block = (char *)malloc(allocate_size); assert_non_null(block); /* Calculate the returned address. */ ptr = (char*)(((size_t)block + MALLOC_GUARD_SIZE + sizeof(struct MallocBlockInfoData) + MALLOC_ALIGNMENT) & ~(MALLOC_ALIGNMENT - 1)); /* Initialize the guard blocks. */ memset(ptr - MALLOC_GUARD_SIZE, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); memset(ptr + size, MALLOC_GUARD_PATTERN, MALLOC_GUARD_SIZE); memset(ptr, MALLOC_ALLOC_PATTERN, size); block_info.ptr = ptr - (MALLOC_GUARD_SIZE + sizeof(struct MallocBlockInfoData)); set_source_location(&block_info.data->location, file, line); block_info.data->allocated_size = allocate_size; block_info.data->size = size; block_info.data->block = block; block_info.data->node.value = block_info.ptr; list_add(block_list, &block_info.data->node); return ptr; } #define malloc test_malloc void* _test_calloc(const size_t number_of_elements, const size_t size, const char* file, const int line) { void* const ptr = _test_malloc(number_of_elements * size, file, line); if (ptr) { memset(ptr, 0, number_of_elements * size); } return ptr; } /* Use the real free in this function. */ #undef free void _test_free(void* const ptr, const char* file, const int line) { unsigned int i; char *block = discard_const_p(char, ptr); MallocBlockInfo block_info; if (ptr == NULL) { return; } _assert_true(cast_ptr_to_largest_integral_type(ptr), "ptr", file, line); block_info.ptr = block - (MALLOC_GUARD_SIZE + sizeof(struct MallocBlockInfoData)); /* Check the guard blocks. */ { char *guards[2] = {block - MALLOC_GUARD_SIZE, block + block_info.data->size}; for (i = 0; i < ARRAY_SIZE(guards); i++) { unsigned int j; char * const guard = guards[i]; for (j = 0; j < MALLOC_GUARD_SIZE; j++) { const char diff = guard[j] - MALLOC_GUARD_PATTERN; if (diff) { cm_print_error(SOURCE_LOCATION_FORMAT ": error: Guard block of %p size=%lu is corrupt\n" SOURCE_LOCATION_FORMAT ": note: allocated here at %p\n", file, line, ptr, (unsigned long)block_info.data->size, block_info.data->location.file, block_info.data->location.line, (void *)&guard[j]); _fail(file, line); } } } } list_remove(&block_info.data->node, NULL, NULL); block = discard_const_p(char, block_info.data->block); memset(block, MALLOC_FREE_PATTERN, block_info.data->allocated_size); free(block); } #define free test_free #undef realloc void *_test_realloc(void *ptr, const size_t size, const char *file, const int line) { MallocBlockInfo block_info; char *block = ptr; size_t block_size = size; void *new_block; if (ptr == NULL) { return _test_malloc(size, file, line); } if (size == 0) { _test_free(ptr, file, line); return NULL; } block_info.ptr = block - (MALLOC_GUARD_SIZE + sizeof(struct MallocBlockInfoData)); new_block = _test_malloc(size, file, line); if (new_block == NULL) { return NULL; } if (block_info.data->size < size) { block_size = block_info.data->size; } memcpy(new_block, ptr, block_size); /* Free previous memory */ _test_free(ptr, file, line); return new_block; } #define realloc test_realloc /* Crudely checkpoint the current heap state. */ static const ListNode* check_point_allocated_blocks(void) { return get_allocated_blocks_list()->prev; } /* Display the blocks allocated after the specified check point. This * function returns the number of blocks displayed. */ static size_t display_allocated_blocks(const ListNode * const check_point) { const ListNode * const head = get_allocated_blocks_list(); const ListNode *node; size_t allocated_blocks = 0; assert_non_null(check_point); assert_non_null(check_point->next); for (node = check_point->next; node != head; node = node->next) { const MallocBlockInfo block_info = { .ptr = discard_const(node->value), }; assert_non_null(block_info.ptr); if (allocated_blocks == 0) { cm_print_error("Blocks allocated...\n"); } cm_print_error(SOURCE_LOCATION_FORMAT ": note: block %p allocated here\n", block_info.data->location.file, block_info.data->location.line, block_info.data->block); allocated_blocks++; } return allocated_blocks; } /* Free all blocks allocated after the specified check point. */ static void free_allocated_blocks(const ListNode * const check_point) { const ListNode * const head = get_allocated_blocks_list(); const ListNode *node; assert_non_null(check_point); node = check_point->next; assert_non_null(node); while (node != head) { const MallocBlockInfo block_info = { .ptr = discard_const(node->value), }; node = node->next; free(discard_const_p(char, block_info.data) + sizeof(struct MallocBlockInfoData) + MALLOC_GUARD_SIZE); } } /* Fail if any any blocks are allocated after the specified check point. */ static void fail_if_blocks_allocated(const ListNode * const check_point, const char * const test_name) { const size_t allocated_blocks = display_allocated_blocks(check_point); if (allocated_blocks > 0) { free_allocated_blocks(check_point); cm_print_error("ERROR: %s leaked %zu block(s)\n", test_name, allocated_blocks); exit_test(1); } } void _fail(const char * const file, const int line) { enum cm_message_output output = cm_get_output(); switch(output) { case CM_OUTPUT_STDOUT: cm_print_error("[ LINE ] --- " SOURCE_LOCATION_FORMAT ": error: Failure!", file, line); break; default: cm_print_error(SOURCE_LOCATION_FORMAT ": error: Failure!", file, line); break; } exit_test(1); } #ifndef _WIN32 static void exception_handler(int sig) { const char *sig_strerror = ""; #ifdef HAVE_STRSIGNAL sig_strerror = strsignal(sig); #endif cm_print_error("Test failed with exception: %s(%d)", sig_strerror, sig); exit_test(1); } #else /* _WIN32 */ static LONG WINAPI exception_filter(EXCEPTION_POINTERS *exception_pointers) { EXCEPTION_RECORD * const exception_record = exception_pointers->ExceptionRecord; const DWORD code = exception_record->ExceptionCode; unsigned int i; for (i = 0; i < ARRAY_SIZE(exception_codes); i++) { const ExceptionCodeInfo * const code_info = &exception_codes[i]; if (code == code_info->code) { static int shown_debug_message = 0; fflush(stdout); cm_print_error("%s occurred at %p.\n", code_info->description, exception_record->ExceptionAddress); if (!shown_debug_message) { cm_print_error( "\n" "To debug in Visual Studio...\n" "1. Select menu item File->Open Project\n" "2. Change 'Files of type' to 'Executable Files'\n" "3. Open this executable.\n" "4. Select menu item Debug->Start\n" "\n" "Alternatively, set the environment variable \n" "UNIT_TESTING_DEBUG to 1 and rebuild this executable, \n" "then click 'Debug' in the popup dialog box.\n" "\n"); shown_debug_message = 1; } exit_test(0); return EXCEPTION_EXECUTE_HANDLER; } } return EXCEPTION_CONTINUE_SEARCH; } #endif /* !_WIN32 */ void cm_print_error(const char * const format, ...) { va_list args; va_start(args, format); if (cm_error_message_enabled) { vcm_print_error(format, args); } else { vprint_error(format, args); } va_end(args); } /* Standard output and error print methods. */ void vprint_message(const char* const format, va_list args) { char buffer[1024]; vsnprintf(buffer, sizeof(buffer), format, args); printf("%s", buffer); fflush(stdout); #ifdef _WIN32 OutputDebugString(buffer); #endif /* _WIN32 */ } void vprint_error(const char* const format, va_list args) { char buffer[1024]; vsnprintf(buffer, sizeof(buffer), format, args); fprintf(stderr, "%s", buffer); fflush(stderr); #ifdef _WIN32 OutputDebugString(buffer); #endif /* _WIN32 */ } void print_message(const char* const format, ...) { va_list args; va_start(args, format); vprint_message(format, args); va_end(args); } void print_error(const char* const format, ...) { va_list args; va_start(args, format); vprint_error(format, args); va_end(args); } /* New formatter */ static enum cm_message_output cm_get_output(void) { enum cm_message_output output = global_msg_output; char *env; env = getenv("CMOCKA_MESSAGE_OUTPUT"); if (env != NULL) { if (strcasecmp(env, "STDOUT") == 0) { output = CM_OUTPUT_STDOUT; } else if (strcasecmp(env, "SUBUNIT") == 0) { output = CM_OUTPUT_SUBUNIT; } else if (strcasecmp(env, "TAP") == 0) { output = CM_OUTPUT_TAP; } else if (strcasecmp(env, "XML") == 0) { output = CM_OUTPUT_XML; } } return output; } enum cm_printf_type { PRINTF_TEST_START, PRINTF_TEST_SUCCESS, PRINTF_TEST_FAILURE, PRINTF_TEST_ERROR, PRINTF_TEST_SKIPPED, }; static int xml_printed; static int file_append; static void cmprintf_group_finish_xml(const char *group_name, size_t total_executed, size_t total_failed, size_t total_errors, size_t total_skipped, double total_runtime, struct CMUnitTestState *cm_tests) { FILE *fp = stdout; int file_opened = 0; int multiple_files = 0; char *env; size_t i; env = getenv("CMOCKA_XML_FILE"); if (env != NULL) { char buf[1024]; int rc; snprintf(buf, sizeof(buf), "%s", env); rc = c_strreplace(buf, sizeof(buf), "%g", group_name, &multiple_files); if (rc < 0) { snprintf(buf, sizeof(buf), "%s", env); } fp = fopen(buf, "r"); if (fp == NULL) { fp = fopen(buf, "w"); if (fp != NULL) { file_append = 1; file_opened = 1; } else { fp = stderr; } } else { fclose(fp); if (file_append) { fp = fopen(buf, "a"); if (fp != NULL) { file_opened = 1; xml_printed = 1; } else { fp = stderr; } } else { fp = stderr; } } } if (!xml_printed || (file_opened && !file_append)) { fprintf(fp, "\n"); if (!file_opened) { xml_printed = 1; } } fprintf(fp, "\n"); fprintf(fp, " \n", group_name, total_runtime, /* seconds */ (unsigned)total_executed, (unsigned)total_failed, (unsigned)total_errors, (unsigned)total_skipped); for (i = 0; i < total_executed; i++) { struct CMUnitTestState *cmtest = &cm_tests[i]; fprintf(fp, " \n", cmtest->test->name, cmtest->runtime); switch (cmtest->status) { case CM_TEST_ERROR: case CM_TEST_FAILED: if (cmtest->error_message != NULL) { fprintf(fp, " \n", cmtest->error_message); } else { fprintf(fp, " \n"); } break; case CM_TEST_SKIPPED: fprintf(fp, " \n"); break; case CM_TEST_PASSED: case CM_TEST_NOT_STARTED: break; } fprintf(fp, " \n"); } fprintf(fp, " \n"); fprintf(fp, "\n"); if (file_opened) { fclose(fp); } } static void cmprintf_group_start_standard(const size_t num_tests) { print_message("[==========] Running %u test(s).\n", (unsigned)num_tests); } static void cmprintf_group_finish_standard(size_t total_executed, size_t total_passed, size_t total_failed, size_t total_errors, size_t total_skipped, struct CMUnitTestState *cm_tests) { size_t i; print_message("[==========] %u test(s) run.\n", (unsigned)total_executed); print_error("[ PASSED ] %u test(s).\n", (unsigned)(total_passed)); if (total_skipped) { print_error("[ SKIPPED ] %"PRIdS " test(s), listed below:\n", total_skipped); for (i = 0; i < total_executed; i++) { struct CMUnitTestState *cmtest = &cm_tests[i]; if (cmtest->status == CM_TEST_SKIPPED) { print_error("[ SKIPPED ] %s\n", cmtest->test->name); } } print_error("\n %u SKIPPED TEST(S)\n", (unsigned)(total_skipped)); } if (total_failed) { print_error("[ FAILED ] %"PRIdS " test(s), listed below:\n", total_failed); for (i = 0; i < total_executed; i++) { struct CMUnitTestState *cmtest = &cm_tests[i]; if (cmtest->status == CM_TEST_FAILED) { print_error("[ FAILED ] %s\n", cmtest->test->name); } } print_error("\n %u FAILED TEST(S)\n", (unsigned)(total_failed + total_errors)); } } static void cmprintf_standard(enum cm_printf_type type, const char *test_name, const char *error_message) { switch (type) { case PRINTF_TEST_START: print_message("[ RUN ] %s\n", test_name); break; case PRINTF_TEST_SUCCESS: print_message("[ OK ] %s\n", test_name); break; case PRINTF_TEST_FAILURE: if (error_message != NULL) { print_error("[ ERROR ] --- %s\n", error_message); } print_message("[ FAILED ] %s\n", test_name); break; case PRINTF_TEST_SKIPPED: print_message("[ SKIPPED ] %s\n", test_name); break; case PRINTF_TEST_ERROR: if (error_message != NULL) { print_error("%s\n", error_message); } print_error("[ ERROR ] %s\n", test_name); break; } } static void cmprintf_group_start_tap(const size_t num_tests) { print_message("1..%u\n", (unsigned)num_tests); } static void cmprintf_group_finish_tap(const char *group_name, size_t total_executed, size_t total_passed, size_t total_skipped) { const char *status = "not ok"; if (total_passed + total_skipped == total_executed) { status = "ok"; } print_message("# %s - %s\n", status, group_name); } static void cmprintf_tap(enum cm_printf_type type, uint32_t test_number, const char *test_name, const char *error_message) { switch (type) { case PRINTF_TEST_START: break; case PRINTF_TEST_SUCCESS: print_message("ok %u - %s\n", (unsigned)test_number, test_name); break; case PRINTF_TEST_FAILURE: print_message("not ok %u - %s\n", (unsigned)test_number, test_name); if (error_message != NULL) { char *msg; char *p; msg = strdup(error_message); if (msg == NULL) { return; } p = msg; while (p[0] != '\0') { char *q = p; p = strchr(q, '\n'); if (p != NULL) { p[0] = '\0'; } print_message("# %s\n", q); if (p == NULL) { break; } p++; } libc_free(msg); } break; case PRINTF_TEST_SKIPPED: print_message("not ok %u # SKIP %s\n", (unsigned)test_number, test_name); break; case PRINTF_TEST_ERROR: print_message("not ok %u - %s %s\n", (unsigned)test_number, test_name, error_message); break; } } static void cmprintf_subunit(enum cm_printf_type type, const char *test_name, const char *error_message) { switch (type) { case PRINTF_TEST_START: print_message("test: %s\n", test_name); break; case PRINTF_TEST_SUCCESS: print_message("success: %s\n", test_name); break; case PRINTF_TEST_FAILURE: print_message("failure: %s", test_name); if (error_message != NULL) { print_message(" [\n%s\n]\n", error_message); } break; case PRINTF_TEST_SKIPPED: print_message("skip: %s\n", test_name); break; case PRINTF_TEST_ERROR: print_message("error: %s [ %s ]\n", test_name, error_message); break; } } static void cmprintf_group_start(const size_t num_tests) { enum cm_message_output output; output = cm_get_output(); switch (output) { case CM_OUTPUT_STDOUT: cmprintf_group_start_standard(num_tests); break; case CM_OUTPUT_SUBUNIT: break; case CM_OUTPUT_TAP: cmprintf_group_start_tap(num_tests); break; case CM_OUTPUT_XML: break; } } static void cmprintf_group_finish(const char *group_name, size_t total_executed, size_t total_passed, size_t total_failed, size_t total_errors, size_t total_skipped, double total_runtime, struct CMUnitTestState *cm_tests) { enum cm_message_output output; output = cm_get_output(); switch (output) { case CM_OUTPUT_STDOUT: cmprintf_group_finish_standard(total_executed, total_passed, total_failed, total_errors, total_skipped, cm_tests); break; case CM_OUTPUT_SUBUNIT: break; case CM_OUTPUT_TAP: cmprintf_group_finish_tap(group_name, total_executed, total_passed, total_skipped); break; case CM_OUTPUT_XML: cmprintf_group_finish_xml(group_name, total_executed, total_failed, total_errors, total_skipped, total_runtime, cm_tests); break; } } static void cmprintf(enum cm_printf_type type, size_t test_number, const char *test_name, const char *error_message) { enum cm_message_output output; output = cm_get_output(); switch (output) { case CM_OUTPUT_STDOUT: cmprintf_standard(type, test_name, error_message); break; case CM_OUTPUT_SUBUNIT: cmprintf_subunit(type, test_name, error_message); break; case CM_OUTPUT_TAP: cmprintf_tap(type, test_number, test_name, error_message); break; case CM_OUTPUT_XML: break; } } void cmocka_set_message_output(enum cm_message_output output) { global_msg_output = output; } void cmocka_set_test_filter(const char *pattern) { global_test_filter_pattern = pattern; } /**************************************************************************** * TIME CALCULATIONS ****************************************************************************/ #ifdef HAVE_STRUCT_TIMESPEC static struct timespec cm_tspecdiff(struct timespec time1, struct timespec time0) { struct timespec ret; int xsec = 0; int sign = 1; if (time0.tv_nsec > time1.tv_nsec) { xsec = (int) ((time0.tv_nsec - time1.tv_nsec) / (1E9 + 1)); time0.tv_nsec -= (long int) (1E9 * xsec); time0.tv_sec += xsec; } if ((time1.tv_nsec - time0.tv_nsec) > 1E9) { xsec = (int) ((time1.tv_nsec - time0.tv_nsec) / 1E9); time0.tv_nsec += (long int) (1E9 * xsec); time0.tv_sec -= xsec; } ret.tv_sec = time1.tv_sec - time0.tv_sec; ret.tv_nsec = time1.tv_nsec - time0.tv_nsec; if (time1.tv_sec < time0.tv_sec) { sign = -1; } ret.tv_sec = ret.tv_sec * sign; return ret; } static double cm_secdiff(struct timespec clock1, struct timespec clock0) { double ret; struct timespec diff; diff = cm_tspecdiff(clock1, clock0); ret = diff.tv_sec; ret += (double) diff.tv_nsec / (double) 1E9; return ret; } #endif /* HAVE_STRUCT_TIMESPEC */ /**************************************************************************** * CMOCKA TEST RUNNER ****************************************************************************/ static int cmocka_run_one_test_or_fixture(const char *function_name, CMUnitTestFunction test_func, CMFixtureFunction setup_func, CMFixtureFunction teardown_func, void ** const volatile state, const void *const heap_check_point) { const ListNode * const volatile check_point = (const ListNode*) (heap_check_point != NULL ? heap_check_point : check_point_allocated_blocks()); int handle_exceptions = 1; void *current_state = NULL; int rc = 0; /* FIXME check only one test or fixture is set */ /* Detect if we should handle exceptions */ #ifdef _WIN32 handle_exceptions = !IsDebuggerPresent(); #endif /* _WIN32 */ #ifdef UNIT_TESTING_DEBUG handle_exceptions = 0; #endif /* UNIT_TESTING_DEBUG */ if (handle_exceptions) { #ifndef _WIN32 unsigned int i; for (i = 0; i < ARRAY_SIZE(exception_signals); i++) { default_signal_functions[i] = signal( exception_signals[i], exception_handler); } #else /* _WIN32 */ previous_exception_filter = SetUnhandledExceptionFilter( exception_filter); #endif /* !_WIN32 */ } /* Init the test structure */ initialize_testing(function_name); global_running_test = 1; if (cm_setjmp(global_run_test_env) == 0) { if (test_func != NULL) { test_func(state != NULL ? state : ¤t_state); fail_if_blocks_allocated(check_point, function_name); rc = 0; } else if (setup_func != NULL) { rc = setup_func(state != NULL ? state : ¤t_state); /* * For setup we can ignore any allocated blocks. We just need to * ensure they're deallocated on tear down. */ } else if (teardown_func != NULL) { rc = teardown_func(state != NULL ? state : ¤t_state); fail_if_blocks_allocated(check_point, function_name); } else { /* ERROR */ } fail_if_leftover_values(function_name); global_running_test = 0; } else { /* TEST FAILED */ global_running_test = 0; rc = -1; } teardown_testing(function_name); if (handle_exceptions) { #ifndef _WIN32 unsigned int i; for (i = 0; i < ARRAY_SIZE(exception_signals); i++) { signal(exception_signals[i], default_signal_functions[i]); } #else /* _WIN32 */ if (previous_exception_filter) { SetUnhandledExceptionFilter(previous_exception_filter); previous_exception_filter = NULL; } #endif /* !_WIN32 */ } return rc; } static int cmocka_run_group_fixture(const char *function_name, CMFixtureFunction setup_func, CMFixtureFunction teardown_func, void **state, const void *const heap_check_point) { int rc; if (setup_func != NULL) { rc = cmocka_run_one_test_or_fixture(function_name, NULL, setup_func, NULL, state, heap_check_point); } else { rc = cmocka_run_one_test_or_fixture(function_name, NULL, NULL, teardown_func, state, heap_check_point); } return rc; } static int cmocka_run_one_tests(struct CMUnitTestState *test_state) { #ifdef HAVE_STRUCT_TIMESPEC struct timespec start = { .tv_sec = 0, .tv_nsec = 0, }; struct timespec finish = { .tv_sec = 0, .tv_nsec = 0, }; #endif int rc = 0; /* Run setup */ if (test_state->test->setup_func != NULL) { /* Setup the memory check point, it will be evaluated on teardown */ test_state->check_point = check_point_allocated_blocks(); rc = cmocka_run_one_test_or_fixture(test_state->test->name, NULL, test_state->test->setup_func, NULL, &test_state->state, test_state->check_point); if (rc != 0) { test_state->status = CM_TEST_ERROR; cm_print_error("Test setup failed"); } } /* Run test */ #ifdef HAVE_STRUCT_TIMESPEC CMOCKA_CLOCK_GETTIME(CLOCK_REALTIME, &start); #endif if (rc == 0) { rc = cmocka_run_one_test_or_fixture(test_state->test->name, test_state->test->test_func, NULL, NULL, &test_state->state, NULL); if (rc == 0) { test_state->status = CM_TEST_PASSED; } else { if (global_skip_test) { test_state->status = CM_TEST_SKIPPED; global_skip_test = 0; /* Do not skip the next test */ } else { test_state->status = CM_TEST_FAILED; } } rc = 0; } test_state->runtime = 0.0; #ifdef HAVE_STRUCT_TIMESPEC CMOCKA_CLOCK_GETTIME(CLOCK_REALTIME, &finish); test_state->runtime = cm_secdiff(finish, start); #endif /* Run teardown */ if (rc == 0 && test_state->test->teardown_func != NULL) { rc = cmocka_run_one_test_or_fixture(test_state->test->name, NULL, NULL, test_state->test->teardown_func, &test_state->state, test_state->check_point); if (rc != 0) { test_state->status = CM_TEST_ERROR; cm_print_error("Test teardown failed"); } } test_state->error_message = cm_error_message; cm_error_message = NULL; return rc; } int _cmocka_run_group_tests(const char *group_name, const struct CMUnitTest * const tests, const size_t num_tests, CMFixtureFunction group_setup, CMFixtureFunction group_teardown) { struct CMUnitTestState *cm_tests; const ListNode *group_check_point = check_point_allocated_blocks(); void *group_state = NULL; size_t total_tests = 0; size_t total_failed = 0; size_t total_passed = 0; size_t total_executed = 0; size_t total_errors = 0; size_t total_skipped = 0; double total_runtime = 0; size_t i; int rc; /* Make sure LargestIntegralType is at least the size of a pointer. */ assert_true(sizeof(LargestIntegralType) >= sizeof(void*)); cm_tests = (struct CMUnitTestState *)libc_malloc(sizeof(struct CMUnitTestState) * num_tests); if (cm_tests == NULL) { return -1; } /* Setup cmocka test array */ for (i = 0; i < num_tests; i++) { if (tests[i].name != NULL && (tests[i].test_func != NULL || tests[i].setup_func != NULL || tests[i].teardown_func != NULL)) { if (global_test_filter_pattern != NULL) { int ok; ok = c_strmatch(tests[i].name, global_test_filter_pattern); if (!ok) { continue; } } cm_tests[total_tests] = (struct CMUnitTestState) { .test = &tests[i], .status = CM_TEST_NOT_STARTED, .state = NULL, }; total_tests++; } } cmprintf_group_start(total_tests); rc = 0; /* Run group setup */ if (group_setup != NULL) { rc = cmocka_run_group_fixture("cmocka_group_setup", group_setup, NULL, &group_state, group_check_point); } if (rc == 0) { /* Execute tests */ for (i = 0; i < total_tests; i++) { struct CMUnitTestState *cmtest = &cm_tests[i]; size_t test_number = i + 1; cmprintf(PRINTF_TEST_START, test_number, cmtest->test->name, NULL); if (group_state != NULL) { cmtest->state = group_state; } else if (cmtest->test->initial_state != NULL) { cmtest->state = cmtest->test->initial_state; } rc = cmocka_run_one_tests(cmtest); total_executed++; total_runtime += cmtest->runtime; if (rc == 0) { switch (cmtest->status) { case CM_TEST_PASSED: cmprintf(PRINTF_TEST_SUCCESS, test_number, cmtest->test->name, cmtest->error_message); total_passed++; break; case CM_TEST_SKIPPED: cmprintf(PRINTF_TEST_SKIPPED, test_number, cmtest->test->name, cmtest->error_message); total_skipped++; break; case CM_TEST_FAILED: cmprintf(PRINTF_TEST_FAILURE, test_number, cmtest->test->name, cmtest->error_message); total_failed++; break; default: cmprintf(PRINTF_TEST_ERROR, test_number, cmtest->test->name, "Internal cmocka error"); total_errors++; break; } } else { char err_msg[2048] = {0}; snprintf(err_msg, sizeof(err_msg), "Could not run test: %s", cmtest->error_message); cmprintf(PRINTF_TEST_ERROR, test_number, cmtest->test->name, err_msg); total_errors++; } } } else { if (cm_error_message != NULL) { print_error("[ ERROR ] --- %s\n", cm_error_message); vcm_free_error(cm_error_message); cm_error_message = NULL; } cmprintf(PRINTF_TEST_ERROR, 0, group_name, "[ FAILED ] GROUP SETUP"); total_errors++; } /* Run group teardown */ if (group_teardown != NULL) { rc = cmocka_run_group_fixture("cmocka_group_teardown", NULL, group_teardown, &group_state, group_check_point); if (rc != 0) { if (cm_error_message != NULL) { print_error("[ ERROR ] --- %s\n", cm_error_message); vcm_free_error(cm_error_message); cm_error_message = NULL; } cmprintf(PRINTF_TEST_ERROR, 0, group_name, "[ FAILED ] GROUP TEARDOWN"); } } cmprintf_group_finish(group_name, total_executed, total_passed, total_failed, total_errors, total_skipped, total_runtime, cm_tests); for (i = 0; i < total_tests; i++) { vcm_free_error(discard_const_p(char, cm_tests[i].error_message)); } libc_free(cm_tests); fail_if_blocks_allocated(group_check_point, "cmocka_group_tests"); return total_failed + total_errors; } /**************************************************************************** * DEPRECATED TEST RUNNER ****************************************************************************/ int _run_test( const char * const function_name, const UnitTestFunction Function, void ** const volatile state, const UnitTestFunctionType function_type, const void* const heap_check_point) { const ListNode * const volatile check_point = (const ListNode*) (heap_check_point ? heap_check_point : check_point_allocated_blocks()); void *current_state = NULL; volatile int rc = 1; int handle_exceptions = 1; #ifdef _WIN32 handle_exceptions = !IsDebuggerPresent(); #endif /* _WIN32 */ #ifdef UNIT_TESTING_DEBUG handle_exceptions = 0; #endif /* UNIT_TESTING_DEBUG */ cm_error_message_enabled = 0; if (handle_exceptions) { #ifndef _WIN32 unsigned int i; for (i = 0; i < ARRAY_SIZE(exception_signals); i++) { default_signal_functions[i] = signal( exception_signals[i], exception_handler); } #else /* _WIN32 */ previous_exception_filter = SetUnhandledExceptionFilter( exception_filter); #endif /* !_WIN32 */ } if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { print_message("[ RUN ] %s\n", function_name); } initialize_testing(function_name); global_running_test = 1; if (cm_setjmp(global_run_test_env) == 0) { Function(state ? state : ¤t_state); fail_if_leftover_values(function_name); /* If this is a setup function then ignore any allocated blocks * only ensure they're deallocated on tear down. */ if (function_type != UNIT_TEST_FUNCTION_TYPE_SETUP) { fail_if_blocks_allocated(check_point, function_name); } global_running_test = 0; if (function_type == UNIT_TEST_FUNCTION_TYPE_TEST) { print_message("[ OK ] %s\n", function_name); } rc = 0; } else { global_running_test = 0; print_message("[ FAILED ] %s\n", function_name); } teardown_testing(function_name); if (handle_exceptions) { #ifndef _WIN32 unsigned int i; for (i = 0; i < ARRAY_SIZE(exception_signals); i++) { signal(exception_signals[i], default_signal_functions[i]); } #else /* _WIN32 */ if (previous_exception_filter) { SetUnhandledExceptionFilter(previous_exception_filter); previous_exception_filter = NULL; } #endif /* !_WIN32 */ } return rc; } int _run_tests(const UnitTest * const tests, const size_t number_of_tests) { /* Whether to execute the next test. */ int run_next_test = 1; /* Whether the previous test failed. */ int previous_test_failed = 0; /* Whether the previous setup failed. */ int previous_setup_failed = 0; /* Check point of the heap state. */ const ListNode * const check_point = check_point_allocated_blocks(); /* Current test being executed. */ size_t current_test = 0; /* Number of tests executed. */ size_t tests_executed = 0; /* Number of failed tests. */ size_t total_failed = 0; /* Number of setup functions. */ size_t setups = 0; /* Number of teardown functions. */ size_t teardowns = 0; size_t i; /* * A stack of test states. A state is pushed on the stack * when a test setup occurs and popped on tear down. */ TestState* test_states = (TestState*)malloc(number_of_tests * sizeof(*test_states)); /* The number of test states which should be 0 at the end */ long number_of_test_states = 0; /* Names of the tests that failed. */ const char** failed_names = (const char**)malloc(number_of_tests * sizeof(*failed_names)); void **current_state = NULL; /* Count setup and teardown functions */ for (i = 0; i < number_of_tests; i++) { const UnitTest * const test = &tests[i]; if (test->function_type == UNIT_TEST_FUNCTION_TYPE_SETUP) { setups++; } if (test->function_type == UNIT_TEST_FUNCTION_TYPE_TEARDOWN) { teardowns++; } } print_message("[==========] Running %"PRIdS " test(s).\n", number_of_tests - setups - teardowns); /* Make sure LargestIntegralType is at least the size of a pointer. */ assert_true(sizeof(LargestIntegralType) >= sizeof(void*)); while (current_test < number_of_tests) { const ListNode *test_check_point = NULL; TestState *current_TestState; const UnitTest * const test = &tests[current_test++]; if (!test->function) { continue; } switch (test->function_type) { case UNIT_TEST_FUNCTION_TYPE_TEST: if (! previous_setup_failed) { run_next_test = 1; } break; case UNIT_TEST_FUNCTION_TYPE_SETUP: { /* Checkpoint the heap before the setup. */ current_TestState = &test_states[number_of_test_states++]; current_TestState->check_point = check_point_allocated_blocks(); test_check_point = current_TestState->check_point; current_state = ¤t_TestState->state; *current_state = NULL; run_next_test = 1; break; } case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: /* Check the heap based on the last setup checkpoint. */ assert_true(number_of_test_states); current_TestState = &test_states[--number_of_test_states]; test_check_point = current_TestState->check_point; current_state = ¤t_TestState->state; break; default: print_error("Invalid unit test function type %d\n", test->function_type); exit_test(1); break; } if (run_next_test) { int failed = _run_test(test->name, test->function, current_state, test->function_type, test_check_point); if (failed) { failed_names[total_failed] = test->name; } switch (test->function_type) { case UNIT_TEST_FUNCTION_TYPE_TEST: previous_test_failed = failed; total_failed += failed; tests_executed ++; break; case UNIT_TEST_FUNCTION_TYPE_SETUP: if (failed) { total_failed ++; tests_executed ++; /* Skip forward until the next test or setup function. */ run_next_test = 0; previous_setup_failed = 1; } previous_test_failed = 0; break; case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: /* If this test failed. */ if (failed && !previous_test_failed) { total_failed ++; } break; default: #ifndef _HPUX assert_null("BUG: shouldn't be here!"); #endif break; } } } print_message("[==========] %"PRIdS " test(s) run.\n", tests_executed); print_error("[ PASSED ] %"PRIdS " test(s).\n", tests_executed - total_failed); if (total_failed > 0) { print_error("[ FAILED ] %"PRIdS " test(s), listed below:\n", total_failed); for (i = 0; i < total_failed; i++) { print_error("[ FAILED ] %s\n", failed_names[i]); } } else { print_error("\n %"PRIdS " FAILED TEST(S)\n", total_failed); } if (number_of_test_states != 0) { print_error("[ ERROR ] Mismatched number of setup %"PRIdS " and " "teardown %"PRIdS " functions\n", setups, teardowns); total_failed = (size_t)-1; } free(test_states); free((void*)failed_names); fail_if_blocks_allocated(check_point, "run_tests"); return (int)total_failed; } int _run_group_tests(const UnitTest * const tests, const size_t number_of_tests) { UnitTestFunction setup = NULL; const char *setup_name; size_t num_setups = 0; UnitTestFunction teardown = NULL; const char *teardown_name = NULL; size_t num_teardowns = 0; size_t current_test = 0; size_t i; /* Number of tests executed. */ size_t tests_executed = 0; /* Number of failed tests. */ size_t total_failed = 0; /* Check point of the heap state. */ const ListNode * const check_point = check_point_allocated_blocks(); const char **failed_names = NULL; void **current_state = NULL; TestState group_state = { .check_point = NULL, }; if (number_of_tests == 0) { return -1; } failed_names = (const char **)malloc(number_of_tests * sizeof(*failed_names)); if (failed_names == NULL) { return -2; } /* Find setup and teardown function */ for (i = 0; i < number_of_tests; i++) { const UnitTest * const test = &tests[i]; if (test->function_type == UNIT_TEST_FUNCTION_TYPE_GROUP_SETUP) { if (setup == NULL) { setup = test->function; setup_name = test->name; num_setups = 1; } else { print_error("[ ERROR ] More than one group setup function detected\n"); exit_test(1); } } if (test->function_type == UNIT_TEST_FUNCTION_TYPE_GROUP_TEARDOWN) { if (teardown == NULL) { teardown = test->function; teardown_name = test->name; num_teardowns = 1; } else { print_error("[ ERROR ] More than one group teardown function detected\n"); exit_test(1); } } } print_message("[==========] Running %"PRIdS " test(s).\n", number_of_tests - num_setups - num_teardowns); if (setup != NULL) { int failed; group_state.check_point = check_point_allocated_blocks(); current_state = &group_state.state; *current_state = NULL; failed = _run_test(setup_name, setup, current_state, UNIT_TEST_FUNCTION_TYPE_SETUP, group_state.check_point); if (failed) { failed_names[total_failed] = setup_name; } total_failed += failed; tests_executed++; } while (current_test < number_of_tests) { int run_test = 0; const UnitTest * const test = &tests[current_test++]; if (test->function == NULL) { continue; } switch (test->function_type) { case UNIT_TEST_FUNCTION_TYPE_TEST: run_test = 1; break; case UNIT_TEST_FUNCTION_TYPE_SETUP: case UNIT_TEST_FUNCTION_TYPE_TEARDOWN: case UNIT_TEST_FUNCTION_TYPE_GROUP_SETUP: case UNIT_TEST_FUNCTION_TYPE_GROUP_TEARDOWN: break; default: print_error("Invalid unit test function type %d\n", test->function_type); break; } if (run_test) { int failed; failed = _run_test(test->name, test->function, current_state, test->function_type, NULL); if (failed) { failed_names[total_failed] = test->name; } total_failed += failed; tests_executed++; } } if (teardown != NULL) { int failed; failed = _run_test(teardown_name, teardown, current_state, UNIT_TEST_FUNCTION_TYPE_GROUP_TEARDOWN, group_state.check_point); if (failed) { failed_names[total_failed] = teardown_name; } total_failed += failed; tests_executed++; } print_message("[==========] %"PRIdS " test(s) run.\n", tests_executed); print_error("[ PASSED ] %"PRIdS " test(s).\n", tests_executed - total_failed); if (total_failed) { print_error("[ FAILED ] %"PRIdS " test(s), listed below:\n", total_failed); for (i = 0; i < total_failed; i++) { print_error("[ FAILED ] %s\n", failed_names[i]); } } else { print_error("\n %"PRIdS " FAILED TEST(S)\n", total_failed); } free((void*)failed_names); fail_if_blocks_allocated(check_point, "run_group_tests"); return (int)total_failed; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0092094 tevent-0.11.0/third_party/cmocka/cmocka.h0000660000000000000000000021622700000000000020241 0ustar00rootroot00000000000000/* * Copyright 2008 Google Inc. * Copyright 2014-2018 Andreas Schneider * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CMOCKA_H_ #define CMOCKA_H_ #ifdef _WIN32 # ifdef _MSC_VER #define __func__ __FUNCTION__ # ifndef inline #define inline __inline # endif /* inline */ # if _MSC_VER < 1500 # ifdef __cplusplus extern "C" { # endif /* __cplusplus */ int __stdcall IsDebuggerPresent(); # ifdef __cplusplus } /* extern "C" */ # endif /* __cplusplus */ # endif /* _MSC_VER < 1500 */ # endif /* _MSC_VER */ #endif /* _WIN32 */ /** * @defgroup cmocka The CMocka API * * These headers or their equivalents should be included prior to including * this header file. * @code * #include * #include * #include * @endcode * * This allows test applications to use custom definitions of C standard * library functions and types. * * @{ */ /* If __WORDSIZE is not set, try to figure it out and default to 32 bit. */ #ifndef __WORDSIZE # if (defined(__x86_64__) && !defined(__ILP32__)) || defined(__sparc_v9__) || defined(__sparcv9) # define __WORDSIZE 64 # else # define __WORDSIZE 32 # endif #endif #ifdef DOXYGEN /** * Largest integral type. This type should be large enough to hold any * pointer or integer supported by the compiler. */ typedef uintmax_t LargestIntegralType; #else /* DOXGEN */ #ifndef LargestIntegralType # if __WORDSIZE == 64 && !defined(_WIN64) # define LargestIntegralType unsigned long int # else # define LargestIntegralType unsigned long long int # endif #endif /* LargestIntegralType */ #endif /* DOXYGEN */ /* Printf format used to display LargestIntegralType as a hexidecimal. */ #ifndef LargestIntegralTypePrintfFormat # ifdef _WIN32 # define LargestIntegralTypePrintfFormat "0x%I64x" # else # if __WORDSIZE == 64 # define LargestIntegralTypePrintfFormat "%#lx" # else # define LargestIntegralTypePrintfFormat "%#llx" # endif # endif /* _WIN32 */ #endif /* LargestIntegralTypePrintfFormat */ /* Printf format used to display LargestIntegralType as a decimal. */ #ifndef LargestIntegralTypePrintfFormatDecimal # ifdef _WIN32 # define LargestIntegralTypePrintfFormatDecimal "%I64u" # else # if __WORDSIZE == 64 # define LargestIntegralTypePrintfFormatDecimal "%lu" # else # define LargestIntegralTypePrintfFormatDecimal "%llu" # endif # endif /* _WIN32 */ #endif /* LargestIntegralTypePrintfFormat */ /* Perform an unsigned cast to LargestIntegralType. */ #define cast_to_largest_integral_type(value) \ ((LargestIntegralType)(value)) /* Smallest integral type capable of holding a pointer. */ #if !defined(_UINTPTR_T) && !defined(_UINTPTR_T_DEFINED) # if defined(_WIN32) /* WIN32 is an ILP32 platform */ typedef unsigned int uintptr_t; # elif defined(_WIN64) typedef unsigned long int uintptr_t # else /* _WIN32 */ /* ILP32 and LP64 platforms */ # ifdef __WORDSIZE /* glibc */ # if __WORDSIZE == 64 typedef unsigned long int uintptr_t; # else typedef unsigned int uintptr_t; # endif /* __WORDSIZE == 64 */ # else /* __WORDSIZE */ # if defined(_LP64) || defined(_I32LPx) typedef unsigned long int uintptr_t; # else typedef unsigned int uintptr_t; # endif # endif /* __WORDSIZE */ # endif /* _WIN32 */ # define _UINTPTR_T # define _UINTPTR_T_DEFINED #endif /* !defined(_UINTPTR_T) || !defined(_UINTPTR_T_DEFINED) */ /* Perform an unsigned cast to uintptr_t. */ #define cast_to_pointer_integral_type(value) \ ((uintptr_t)((size_t)(value))) /* Perform a cast of a pointer to LargestIntegralType */ #define cast_ptr_to_largest_integral_type(value) \ cast_to_largest_integral_type(cast_to_pointer_integral_type(value)) /* GCC have printf type attribute check. */ #ifdef __GNUC__ #define CMOCKA_PRINTF_ATTRIBUTE(a,b) \ __attribute__ ((__format__ (__printf__, a, b))) #else #define CMOCKA_PRINTF_ATTRIBUTE(a,b) #endif /* __GNUC__ */ #if defined(__GNUC__) #define CMOCKA_DEPRECATED __attribute__ ((deprecated)) #elif defined(_MSC_VER) #define CMOCKA_DEPRECATED __declspec(deprecated) #else #define CMOCKA_DEPRECATED #endif #define WILL_RETURN_ALWAYS -1 #define WILL_RETURN_ONCE -2 /** * @defgroup cmocka_mock Mock Objects * @ingroup cmocka * * Mock objects mock objects are simulated objects that mimic the behavior of * real objects. Instead of calling the real objects, the tested object calls a * mock object that merely asserts that the correct methods were called, with * the expected parameters, in the correct order. * *
    *
  • will_return(function, value) - The will_return() macro * pushes a value onto a stack of mock values. This macro is intended to be * used by the unit test itself, while programming the behaviour of the mocked * object.
  • * *
  • mock() - the mock macro pops a value from a stack of * test values. The user of the mock() macro is the mocked object that uses it * to learn how it should behave.
  • *
* * Because the will_return() and mock() are intended to be used in pairs, the * cmocka library would fail the test if there are more values pushed onto the * stack using will_return() than consumed with mock() and vice-versa. * * The following unit test stub illustrates how would a unit test instruct the * mock object to return a particular value: * * @code * will_return(chef_cook, "hotdog"); * will_return(chef_cook, 0); * @endcode * * Now the mock object can check if the parameter it received is the parameter * which is expected by the test driver. This can be done the following way: * * @code * int chef_cook(const char *order, char **dish_out) * { * check_expected(order); * } * @endcode * * For a complete example please at a look * here. * * @{ */ #ifdef DOXYGEN /** * @brief Retrieve a return value of the current function. * * @return The value which was stored to return by this function. * * @see will_return() */ LargestIntegralType mock(void); #else #define mock() _mock(__func__, __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Retrieve a typed return value of the current function. * * The value would be casted to type internally to avoid having the * caller to do the cast manually. * * @param[in] #type The expected type of the return value * * @return The value which was stored to return by this function. * * @code * int param; * * param = mock_type(int); * @endcode * * @see will_return() * @see mock() * @see mock_ptr_type() */ #type mock_type(#type); #else #define mock_type(type) ((type) mock()) #endif #ifdef DOXYGEN /** * @brief Retrieve a typed return value of the current function. * * The value would be casted to type internally to avoid having the * caller to do the cast manually but also casted to uintptr_t to make * sure the result has a valid size to be used as a pointer. * * @param[in] #type The expected type of the return value * * @return The value which was stored to return by this function. * * @code * char *param; * * param = mock_ptr_type(char *); * @endcode * * @see will_return() * @see mock() * @see mock_type() */ type mock_ptr_type(#type); #else #define mock_ptr_type(type) ((type) (uintptr_t) mock()) #endif #ifdef DOXYGEN /** * @brief Store a value to be returned by mock() later. * * @param[in] #function The function which should return the given value. * * @param[in] value The value to be returned by mock(). * * @code * int return_integer(void) * { * return (int)mock(); * } * * static void test_integer_return(void **state) * { * will_return(return_integer, 42); * * assert_int_equal(my_function_calling_return_integer(), 42); * } * @endcode * * @see mock() * @see will_return_count() */ void will_return(#function, LargestIntegralType value); #else #define will_return(function, value) \ _will_return(#function, __FILE__, __LINE__, \ cast_to_largest_integral_type(value), 1) #endif #ifdef DOXYGEN /** * @brief Store a value to be returned by mock() later. * * @param[in] #function The function which should return the given value. * * @param[in] value The value to be returned by mock(). * * @param[in] count The parameter indicates the number of times the value should * be returned by mock(). If count is set to -1, the value * will always be returned but must be returned at least once. * If count is set to -2, the value will always be returned * by mock(), but is not required to be returned. * * @see mock() */ void will_return_count(#function, LargestIntegralType value, int count); #else #define will_return_count(function, value, count) \ _will_return(#function, __FILE__, __LINE__, \ cast_to_largest_integral_type(value), count) #endif #ifdef DOXYGEN /** * @brief Store a value that will be always returned by mock(). * * @param[in] #function The function which should return the given value. * * @param[in] #value The value to be returned by mock(). * * This is equivalent to: * @code * will_return_count(function, value, -1); * @endcode * * @see will_return_count() * @see mock() */ void will_return_always(#function, LargestIntegralType value); #else #define will_return_always(function, value) \ will_return_count(function, (value), WILL_RETURN_ALWAYS) #endif #ifdef DOXYGEN /** * @brief Store a value that may be always returned by mock(). * * This stores a value which will always be returned by mock() but is not * required to be returned by at least one call to mock(). Therefore, * in contrast to will_return_always() which causes a test failure if it * is not returned at least once, will_return_maybe() will never cause a test * to fail if its value is not returned. * * @param[in] #function The function which should return the given value. * * @param[in] #value The value to be returned by mock(). * * This is equivalent to: * @code * will_return_count(function, value, -2); * @endcode * * @see will_return_count() * @see mock() */ void will_return_maybe(#function, LargestIntegralType value); #else #define will_return_maybe(function, value) \ will_return_count(function, (value), WILL_RETURN_ONCE) #endif /** @} */ /** * @defgroup cmocka_param Checking Parameters * @ingroup cmocka * * Functionality to store expected values for mock function parameters. * * In addition to storing the return values of mock functions, cmocka provides * functionality to store expected values for mock function parameters using * the expect_*() functions provided. A mock function parameter can then be * validated using the check_expected() macro. * * Successive calls to expect_*() macros for a parameter queues values to check * the specified parameter. check_expected() checks a function parameter * against the next value queued using expect_*(), if the parameter check fails * a test failure is signalled. In addition if check_expected() is called and * no more parameter values are queued a test failure occurs. * * The following test stub illustrates how to do this. First is the the function * we call in the test driver: * * @code * static void test_driver(void **state) * { * expect_string(chef_cook, order, "hotdog"); * } * @endcode * * Now the chef_cook function can check if the parameter we got passed is the * parameter which is expected by the test driver. This can be done the * following way: * * @code * int chef_cook(const char *order, char **dish_out) * { * check_expected(order); * } * @endcode * * For a complete example please at a look at * here * * @{ */ /* * Add a custom parameter checking function. If the event parameter is NULL * the event structure is allocated internally by this function. If event * parameter is provided it must be allocated on the heap and doesn't need to * be deallocated by the caller. */ #ifdef DOXYGEN /** * @brief Add a custom parameter checking function. * * If the event parameter is NULL the event structure is allocated internally * by this function. If the parameter is provided it must be allocated on the * heap and doesn't need to be deallocated by the caller. * * @param[in] #function The function to add a custom parameter checking * function for. * * @param[in] #parameter The parameters passed to the function. * * @param[in] #check_function The check function to call. * * @param[in] check_data The data to pass to the check function. */ void expect_check(#function, #parameter, #check_function, const void *check_data); #else #define expect_check(function, parameter, check_function, check_data) \ _expect_check(#function, #parameter, __FILE__, __LINE__, check_function, \ cast_to_largest_integral_type(check_data), NULL, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value is part of the provided * array. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value_array[] The array to check for the value. * * @see check_expected(). */ void expect_in_set(#function, #parameter, LargestIntegralType value_array[]); #else #define expect_in_set(function, parameter, value_array) \ expect_in_set_count(function, parameter, value_array, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value is part of the provided * array. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value_array[] The array to check for the value. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_in_set_count(#function, #parameter, LargestIntegralType value_array[], size_t count); #else #define expect_in_set_count(function, parameter, value_array, count) \ _expect_in_set(#function, #parameter, __FILE__, __LINE__, value_array, \ sizeof(value_array) / sizeof((value_array)[0]), count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value is not part of the * provided array. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value_array[] The array to check for the value. * * @see check_expected(). */ void expect_not_in_set(#function, #parameter, LargestIntegralType value_array[]); #else #define expect_not_in_set(function, parameter, value_array) \ expect_not_in_set_count(function, parameter, value_array, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value is not part of the * provided array. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value_array[] The array to check for the value. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_not_in_set_count(#function, #parameter, LargestIntegralType value_array[], size_t count); #else #define expect_not_in_set_count(function, parameter, value_array, count) \ _expect_not_in_set( \ #function, #parameter, __FILE__, __LINE__, value_array, \ sizeof(value_array) / sizeof((value_array)[0]), count) #endif #ifdef DOXYGEN /** * @brief Add an event to check a parameter is inside a numerical range. * The check would succeed if minimum <= value <= maximum. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] minimum The lower boundary of the interval to check against. * * @param[in] maximum The upper boundary of the interval to check against. * * @see check_expected(). */ void expect_in_range(#function, #parameter, LargestIntegralType minimum, LargestIntegralType maximum); #else #define expect_in_range(function, parameter, minimum, maximum) \ expect_in_range_count(function, parameter, minimum, maximum, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to repeatedly check a parameter is inside a * numerical range. The check would succeed if minimum <= value <= maximum. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] minimum The lower boundary of the interval to check against. * * @param[in] maximum The upper boundary of the interval to check against. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_in_range_count(#function, #parameter, LargestIntegralType minimum, LargestIntegralType maximum, size_t count); #else #define expect_in_range_count(function, parameter, minimum, maximum, count) \ _expect_in_range(#function, #parameter, __FILE__, __LINE__, minimum, \ maximum, count) #endif #ifdef DOXYGEN /** * @brief Add an event to check a parameter is outside a numerical range. * The check would succeed if minimum > value > maximum. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] minimum The lower boundary of the interval to check against. * * @param[in] maximum The upper boundary of the interval to check against. * * @see check_expected(). */ void expect_not_in_range(#function, #parameter, LargestIntegralType minimum, LargestIntegralType maximum); #else #define expect_not_in_range(function, parameter, minimum, maximum) \ expect_not_in_range_count(function, parameter, minimum, maximum, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to repeatedly check a parameter is outside a * numerical range. The check would succeed if minimum > value > maximum. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] minimum The lower boundary of the interval to check against. * * @param[in] maximum The upper boundary of the interval to check against. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_not_in_range_count(#function, #parameter, LargestIntegralType minimum, LargestIntegralType maximum, size_t count); #else #define expect_not_in_range_count(function, parameter, minimum, maximum, \ count) \ _expect_not_in_range(#function, #parameter, __FILE__, __LINE__, \ minimum, maximum, count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if a parameter is the given value. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value The value to check. * * @see check_expected(). */ void expect_value(#function, #parameter, LargestIntegralType value); #else #define expect_value(function, parameter, value) \ expect_value_count(function, parameter, value, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to repeatedly check if a parameter is the given value. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value The value to check. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_value_count(#function, #parameter, LargestIntegralType value, size_t count); #else #define expect_value_count(function, parameter, value, count) \ _expect_value(#function, #parameter, __FILE__, __LINE__, \ cast_to_largest_integral_type(value), count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if a parameter isn't the given value. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value The value to check. * * @see check_expected(). */ void expect_not_value(#function, #parameter, LargestIntegralType value); #else #define expect_not_value(function, parameter, value) \ expect_not_value_count(function, parameter, value, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to repeatedly check if a parameter isn't the given value. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] value The value to check. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_not_value_count(#function, #parameter, LargestIntegralType value, size_t count); #else #define expect_not_value_count(function, parameter, value, count) \ _expect_not_value(#function, #parameter, __FILE__, __LINE__, \ cast_to_largest_integral_type(value), count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value is equal to the * provided string. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] string The string value to compare. * * @see check_expected(). */ void expect_string(#function, #parameter, const char *string); #else #define expect_string(function, parameter, string) \ expect_string_count(function, parameter, string, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value is equal to the * provided string. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] string The string value to compare. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_string_count(#function, #parameter, const char *string, size_t count); #else #define expect_string_count(function, parameter, string, count) \ _expect_string(#function, #parameter, __FILE__, __LINE__, \ (const char*)(string), count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value isn't equal to the * provided string. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] string The string value to compare. * * @see check_expected(). */ void expect_not_string(#function, #parameter, const char *string); #else #define expect_not_string(function, parameter, string) \ expect_not_string_count(function, parameter, string, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter value isn't equal to the * provided string. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] string The string value to compare. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_not_string_count(#function, #parameter, const char *string, size_t count); #else #define expect_not_string_count(function, parameter, string, count) \ _expect_not_string(#function, #parameter, __FILE__, __LINE__, \ (const char*)(string), count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter does match an area of memory. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] memory The memory to compare. * * @param[in] size The size of the memory to compare. * * @see check_expected(). */ void expect_memory(#function, #parameter, void *memory, size_t size); #else #define expect_memory(function, parameter, memory, size) \ expect_memory_count(function, parameter, memory, size, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to repeatedly check if the parameter does match an area * of memory. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] memory The memory to compare. * * @param[in] size The size of the memory to compare. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_memory_count(#function, #parameter, void *memory, size_t size, size_t count); #else #define expect_memory_count(function, parameter, memory, size, count) \ _expect_memory(#function, #parameter, __FILE__, __LINE__, \ (const void*)(memory), size, count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if the parameter doesn't match an area of * memory. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] memory The memory to compare. * * @param[in] size The size of the memory to compare. * * @see check_expected(). */ void expect_not_memory(#function, #parameter, void *memory, size_t size); #else #define expect_not_memory(function, parameter, memory, size) \ expect_not_memory_count(function, parameter, memory, size, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to repeatedly check if the parameter doesn't match an * area of memory. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] memory The memory to compare. * * @param[in] size The size of the memory to compare. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_not_memory_count(#function, #parameter, void *memory, size_t size, size_t count); #else #define expect_not_memory_count(function, parameter, memory, size, count) \ _expect_not_memory(#function, #parameter, __FILE__, __LINE__, \ (const void*)(memory), size, count) #endif #ifdef DOXYGEN /** * @brief Add an event to check if a parameter (of any value) has been passed. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @see check_expected(). */ void expect_any(#function, #parameter); #else #define expect_any(function, parameter) \ expect_any_count(function, parameter, 1) #endif #ifdef DOXYGEN /** * @brief Add an event to repeatedly check if a parameter (of any value) has * been passed. * * The event is triggered by calling check_expected() in the mocked function. * * @param[in] #function The function to add the check for. * * @param[in] #parameter The name of the parameter passed to the function. * * @param[in] count The count parameter returns the number of times the value * should be returned by check_expected(). If count is set * to -1 the value will always be returned. * * @see check_expected(). */ void expect_any_count(#function, #parameter, size_t count); #else #define expect_any_count(function, parameter, count) \ _expect_any(#function, #parameter, __FILE__, __LINE__, count) #endif #ifdef DOXYGEN /** * @brief Determine whether a function parameter is correct. * * This ensures the next value queued by one of the expect_*() macros matches * the specified variable. * * This function needs to be called in the mock object. * * @param[in] #parameter The parameter to check. */ void check_expected(#parameter); #else #define check_expected(parameter) \ _check_expected(__func__, #parameter, __FILE__, __LINE__, \ cast_to_largest_integral_type(parameter)) #endif #ifdef DOXYGEN /** * @brief Determine whether a function parameter is correct. * * This ensures the next value queued by one of the expect_*() macros matches * the specified variable. * * This function needs to be called in the mock object. * * @param[in] #parameter The pointer to check. */ void check_expected_ptr(#parameter); #else #define check_expected_ptr(parameter) \ _check_expected(__func__, #parameter, __FILE__, __LINE__, \ cast_ptr_to_largest_integral_type(parameter)) #endif /** @} */ /** * @defgroup cmocka_asserts Assert Macros * @ingroup cmocka * * This is a set of useful assert macros like the standard C libary's * assert(3) macro. * * On an assertion failure a cmocka assert macro will write the failure to the * standard error stream and signal a test failure. Due to limitations of the C * language the general C standard library assert() and cmocka's assert_true() * and assert_false() macros can only display the expression that caused the * assert failure. cmocka's type specific assert macros, assert_{type}_equal() * and assert_{type}_not_equal(), display the data that caused the assertion * failure which increases data visibility aiding debugging of failing test * cases. * * @{ */ #ifdef DOXYGEN /** * @brief Assert that the given expression is true. * * The function prints an error message to standard error and terminates the * test by calling fail() if expression is false (i.e., compares equal to * zero). * * @param[in] expression The expression to evaluate. * * @see assert_int_equal() * @see assert_string_equal() */ void assert_true(scalar expression); #else #define assert_true(c) _assert_true(cast_to_largest_integral_type(c), #c, \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the given expression is false. * * The function prints an error message to standard error and terminates the * test by calling fail() if expression is true. * * @param[in] expression The expression to evaluate. * * @see assert_int_equal() * @see assert_string_equal() */ void assert_false(scalar expression); #else #define assert_false(c) _assert_true(!(cast_to_largest_integral_type(c)), #c, \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the return_code is greater than or equal to 0. * * The function prints an error message to standard error and terminates the * test by calling fail() if the return code is smaller than 0. If the function * you check sets an errno if it fails you can pass it to the function and * it will be printed as part of the error message. * * @param[in] rc The return code to evaluate. * * @param[in] error Pass errno here or 0. */ void assert_return_code(int rc, int error); #else #define assert_return_code(rc, error) \ _assert_return_code(cast_to_largest_integral_type(rc), \ sizeof(rc), \ cast_to_largest_integral_type(error), \ #rc, __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the given pointer is non-NULL. * * The function prints an error message to standard error and terminates the * test by calling fail() if the pointer is NULL. * * @param[in] pointer The pointer to evaluate. * * @see assert_null() */ void assert_non_null(void *pointer); #else #define assert_non_null(c) _assert_true(cast_ptr_to_largest_integral_type(c), #c, \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the given pointer is NULL. * * The function prints an error message to standard error and terminates the * test by calling fail() if the pointer is non-NULL. * * @param[in] pointer The pointer to evaluate. * * @see assert_non_null() */ void assert_null(void *pointer); #else #define assert_null(c) _assert_true(!(cast_ptr_to_largest_integral_type(c)), #c, \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given pointers are equal. * * The function prints an error message and terminates the test by calling * fail() if the pointers are not equal. * * @param[in] a The first pointer to compare. * * @param[in] b The pointer to compare against the first one. */ void assert_ptr_equal(void *a, void *b); #else #define assert_ptr_equal(a, b) \ _assert_int_equal(cast_ptr_to_largest_integral_type(a), \ cast_ptr_to_largest_integral_type(b), \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given pointers are not equal. * * The function prints an error message and terminates the test by calling * fail() if the pointers are equal. * * @param[in] a The first pointer to compare. * * @param[in] b The pointer to compare against the first one. */ void assert_ptr_not_equal(void *a, void *b); #else #define assert_ptr_not_equal(a, b) \ _assert_int_not_equal(cast_ptr_to_largest_integral_type(a), \ cast_ptr_to_largest_integral_type(b), \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given integers are equal. * * The function prints an error message to standard error and terminates the * test by calling fail() if the integers are not equal. * * @param[in] a The first integer to compare. * * @param[in] b The integer to compare against the first one. */ void assert_int_equal(int a, int b); #else #define assert_int_equal(a, b) \ _assert_int_equal(cast_to_largest_integral_type(a), \ cast_to_largest_integral_type(b), \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given integers are not equal. * * The function prints an error message to standard error and terminates the * test by calling fail() if the integers are equal. * * @param[in] a The first integer to compare. * * @param[in] b The integer to compare against the first one. * * @see assert_int_equal() */ void assert_int_not_equal(int a, int b); #else #define assert_int_not_equal(a, b) \ _assert_int_not_equal(cast_to_largest_integral_type(a), \ cast_to_largest_integral_type(b), \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given strings are equal. * * The function prints an error message to standard error and terminates the * test by calling fail() if the strings are not equal. * * @param[in] a The string to check. * * @param[in] b The other string to compare. */ void assert_string_equal(const char *a, const char *b); #else #define assert_string_equal(a, b) \ _assert_string_equal((const char*)(a), (const char*)(b), __FILE__, \ __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given strings are not equal. * * The function prints an error message to standard error and terminates the * test by calling fail() if the strings are equal. * * @param[in] a The string to check. * * @param[in] b The other string to compare. */ void assert_string_not_equal(const char *a, const char *b); #else #define assert_string_not_equal(a, b) \ _assert_string_not_equal((const char*)(a), (const char*)(b), __FILE__, \ __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given areas of memory are equal, otherwise fail. * * The function prints an error message to standard error and terminates the * test by calling fail() if the memory is not equal. * * @param[in] a The first memory area to compare * (interpreted as unsigned char). * * @param[in] b The second memory area to compare * (interpreted as unsigned char). * * @param[in] size The first n bytes of the memory areas to compare. */ void assert_memory_equal(const void *a, const void *b, size_t size); #else #define assert_memory_equal(a, b, size) \ _assert_memory_equal((const void*)(a), (const void*)(b), size, __FILE__, \ __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the two given areas of memory are not equal. * * The function prints an error message to standard error and terminates the * test by calling fail() if the memory is equal. * * @param[in] a The first memory area to compare * (interpreted as unsigned char). * * @param[in] b The second memory area to compare * (interpreted as unsigned char). * * @param[in] size The first n bytes of the memory areas to compare. */ void assert_memory_not_equal(const void *a, const void *b, size_t size); #else #define assert_memory_not_equal(a, b, size) \ _assert_memory_not_equal((const void*)(a), (const void*)(b), size, \ __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the specified value is not smaller than the minimum * and and not greater than the maximum. * * The function prints an error message to standard error and terminates the * test by calling fail() if value is not in range. * * @param[in] value The value to check. * * @param[in] minimum The minimum value allowed. * * @param[in] maximum The maximum value allowed. */ void assert_in_range(LargestIntegralType value, LargestIntegralType minimum, LargestIntegralType maximum); #else #define assert_in_range(value, minimum, maximum) \ _assert_in_range( \ cast_to_largest_integral_type(value), \ cast_to_largest_integral_type(minimum), \ cast_to_largest_integral_type(maximum), __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the specified value is smaller than the minimum or * greater than the maximum. * * The function prints an error message to standard error and terminates the * test by calling fail() if value is in range. * * @param[in] value The value to check. * * @param[in] minimum The minimum value to compare. * * @param[in] maximum The maximum value to compare. */ void assert_not_in_range(LargestIntegralType value, LargestIntegralType minimum, LargestIntegralType maximum); #else #define assert_not_in_range(value, minimum, maximum) \ _assert_not_in_range( \ cast_to_largest_integral_type(value), \ cast_to_largest_integral_type(minimum), \ cast_to_largest_integral_type(maximum), __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the specified value is within a set. * * The function prints an error message to standard error and terminates the * test by calling fail() if value is not within a set. * * @param[in] value The value to look up * * @param[in] values[] The array to check for the value. * * @param[in] count The size of the values array. */ void assert_in_set(LargestIntegralType value, LargestIntegralType values[], size_t count); #else #define assert_in_set(value, values, number_of_values) \ _assert_in_set(value, values, number_of_values, __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Assert that the specified value is not within a set. * * The function prints an error message to standard error and terminates the * test by calling fail() if value is within a set. * * @param[in] value The value to look up * * @param[in] values[] The array to check for the value. * * @param[in] count The size of the values array. */ void assert_not_in_set(LargestIntegralType value, LargestIntegralType values[], size_t count); #else #define assert_not_in_set(value, values, number_of_values) \ _assert_not_in_set(value, values, number_of_values, __FILE__, __LINE__) #endif /** @} */ /** * @defgroup cmocka_call_order Call Ordering * @ingroup cmocka * * It is often beneficial to make sure that functions are called in an * order. This is independent of mock returns and parameter checking as both * of the aforementioned do not check the order in which they are called from * different functions. * *
    *
  • expect_function_call(function) - The * expect_function_call() macro pushes an expectation onto the stack of * expected calls.
  • * *
  • function_called() - pops a value from the stack of * expected calls. function_called() is invoked within the mock object * that uses it. *
* * expect_function_call() and function_called() are intended to be used in * pairs. Cmocka will fail a test if there are more or less expected calls * created (e.g. expect_function_call()) than consumed with function_called(). * There are provisions such as ignore_function_calls() which allow this * restriction to be circumvented in tests where mock calls for the code under * test are not the focus of the test. * * The following example illustrates how a unit test instructs cmocka * to expect a function_called() from a particular mock, * chef_sing(): * * @code * void chef_sing(void); * * void code_under_test() * { * chef_sing(); * } * * void some_test(void **state) * { * expect_function_call(chef_sing); * code_under_test(); * } * @endcode * * The implementation of the mock then must check whether it was meant to * be called by invoking function_called(): * * @code * void chef_sing() * { * function_called(); * } * @endcode * * @{ */ #ifdef DOXYGEN /** * @brief Check that current mocked function is being called in the expected * order * * @see expect_function_call() */ void function_called(void); #else #define function_called() _function_called(__func__, __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Store expected call(s) to a mock to be checked by function_called() * later. * * @param[in] #function The function which should should be called * * @param[in] times number of times this mock must be called * * @see function_called() */ void expect_function_calls(#function, const int times); #else #define expect_function_calls(function, times) \ _expect_function_call(#function, __FILE__, __LINE__, times) #endif #ifdef DOXYGEN /** * @brief Store expected single call to a mock to be checked by * function_called() later. * * @param[in] #function The function which should should be called * * @see function_called() */ void expect_function_call(#function); #else #define expect_function_call(function) \ _expect_function_call(#function, __FILE__, __LINE__, 1) #endif #ifdef DOXYGEN /** * @brief Expects function_called() from given mock at least once * * @param[in] #function The function which should should be called * * @see function_called() */ void expect_function_call_any(#function); #else #define expect_function_call_any(function) \ _expect_function_call(#function, __FILE__, __LINE__, -1) #endif #ifdef DOXYGEN /** * @brief Ignores function_called() invocations from given mock function. * * @param[in] #function The function which should should be called * * @see function_called() */ void ignore_function_calls(#function); #else #define ignore_function_calls(function) \ _expect_function_call(#function, __FILE__, __LINE__, -2) #endif /** @} */ /** * @defgroup cmocka_exec Running Tests * @ingroup cmocka * * This is the way tests are executed with CMocka. * * The following example illustrates this macro's use with the unit_test macro. * * @code * void Test0(void **state); * void Test1(void **state); * * int main(void) * { * const struct CMUnitTest tests[] = { * cmocka_unit_test(Test0), * cmocka_unit_test(Test1), * }; * * return cmocka_run_group_tests(tests, NULL, NULL); * } * @endcode * * @{ */ #ifdef DOXYGEN /** * @brief Forces the test to fail immediately and quit. */ void fail(void); #else #define fail() _fail(__FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Forces the test to not be executed, but marked as skipped */ void skip(void); #else #define skip() _skip(__FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Forces the test to fail immediately and quit, printing the reason. * * @code * fail_msg("This is some error message for test"); * @endcode * * or * * @code * char *error_msg = "This is some error message for test"; * fail_msg("%s", error_msg); * @endcode */ void fail_msg(const char *msg, ...); #else #define fail_msg(msg, ...) do { \ print_error("ERROR: " msg "\n", ##__VA_ARGS__); \ fail(); \ } while (0) #endif #ifdef DOXYGEN /** * @brief Generic method to run a single test. * * @deprecated This function was deprecated in favor of cmocka_run_group_tests * * @param[in] #function The function to test. * * @return 0 on success, 1 if an error occured. * * @code * // A test case that does nothing and succeeds. * void null_test_success(void **state) { * } * * int main(void) { * return run_test(null_test_success); * } * @endcode */ int run_test(#function); #else #define run_test(f) _run_test(#f, f, NULL, UNIT_TEST_FUNCTION_TYPE_TEST, NULL) #endif static inline void _unit_test_dummy(void **state) { (void)state; } /** Initializes a UnitTest structure. * * @deprecated This function was deprecated in favor of cmocka_unit_test */ #define unit_test(f) { #f, f, UNIT_TEST_FUNCTION_TYPE_TEST } #define _unit_test_setup(test, setup) \ { #test "_" #setup, setup, UNIT_TEST_FUNCTION_TYPE_SETUP } /** Initializes a UnitTest structure with a setup function. * * @deprecated This function was deprecated in favor of cmocka_unit_test_setup */ #define unit_test_setup(test, setup) \ _unit_test_setup(test, setup), \ unit_test(test), \ _unit_test_teardown(test, _unit_test_dummy) #define _unit_test_teardown(test, teardown) \ { #test "_" #teardown, teardown, UNIT_TEST_FUNCTION_TYPE_TEARDOWN } /** Initializes a UnitTest structure with a teardown function. * * @deprecated This function was deprecated in favor of cmocka_unit_test_teardown */ #define unit_test_teardown(test, teardown) \ _unit_test_setup(test, _unit_test_dummy), \ unit_test(test), \ _unit_test_teardown(test, teardown) /** Initializes a UnitTest structure for a group setup function. * * @deprecated This function was deprecated in favor of cmocka_run_group_tests */ #define group_test_setup(setup) \ { "group_" #setup, setup, UNIT_TEST_FUNCTION_TYPE_GROUP_SETUP } /** Initializes a UnitTest structure for a group teardown function. * * @deprecated This function was deprecated in favor of cmocka_run_group_tests */ #define group_test_teardown(teardown) \ { "group_" #teardown, teardown, UNIT_TEST_FUNCTION_TYPE_GROUP_TEARDOWN } /** * Initialize an array of UnitTest structures with a setup function for a test * and a teardown function. Either setup or teardown can be NULL. * * @deprecated This function was deprecated in favor of * cmocka_unit_test_setup_teardown */ #define unit_test_setup_teardown(test, setup, teardown) \ _unit_test_setup(test, setup), \ unit_test(test), \ _unit_test_teardown(test, teardown) /** Initializes a CMUnitTest structure. */ #define cmocka_unit_test(f) { #f, f, NULL, NULL, NULL } /** Initializes a CMUnitTest structure with a setup function. */ #define cmocka_unit_test_setup(f, setup) { #f, f, setup, NULL, NULL } /** Initializes a CMUnitTest structure with a teardown function. */ #define cmocka_unit_test_teardown(f, teardown) { #f, f, NULL, teardown, NULL } /** * Initialize an array of CMUnitTest structures with a setup function for a test * and a teardown function. Either setup or teardown can be NULL. */ #define cmocka_unit_test_setup_teardown(f, setup, teardown) { #f, f, setup, teardown, NULL } /** * Initialize a CMUnitTest structure with given initial state. It will be passed * to test function as an argument later. It can be used when test state does * not need special initialization or was initialized already. * @note If the group setup function initialized the state already, it won't be * overridden by the initial state defined here. */ #define cmocka_unit_test_prestate(f, state) { #f, f, NULL, NULL, state } /** * Initialize a CMUnitTest structure with given initial state, setup and * teardown function. Any of these values can be NULL. Initial state is passed * later to setup function, or directly to test if none was given. * @note If the group setup function initialized the state already, it won't be * overridden by the initial state defined here. */ #define cmocka_unit_test_prestate_setup_teardown(f, setup, teardown, state) { #f, f, setup, teardown, state } #define run_tests(tests) _run_tests(tests, sizeof(tests) / sizeof((tests)[0])) #define run_group_tests(tests) _run_group_tests(tests, sizeof(tests) / sizeof((tests)[0])) #ifdef DOXYGEN /** * @brief Run tests specified by an array of CMUnitTest structures. * * @param[in] group_tests[] The array of unit tests to execute. * * @param[in] group_setup The setup function which should be called before * all unit tests are executed. * * @param[in] group_teardown The teardown function to be called after all * tests have finished. * * @return 0 on success, or the number of failed tests. * * @code * static int setup(void **state) { * int *answer = malloc(sizeof(int)); * if (*answer == NULL) { * return -1; * } * *answer = 42; * * *state = answer; * * return 0; * } * * static int teardown(void **state) { * free(*state); * * return 0; * } * * static void null_test_success(void **state) { * (void) state; * } * * static void int_test_success(void **state) { * int *answer = *state; * assert_int_equal(*answer, 42); * } * * int main(void) { * const struct CMUnitTest tests[] = { * cmocka_unit_test(null_test_success), * cmocka_unit_test_setup_teardown(int_test_success, setup, teardown), * }; * * return cmocka_run_group_tests(tests, NULL, NULL); * } * @endcode * * @see cmocka_unit_test * @see cmocka_unit_test_setup * @see cmocka_unit_test_teardown * @see cmocka_unit_test_setup_teardown */ int cmocka_run_group_tests(const struct CMUnitTest group_tests[], CMFixtureFunction group_setup, CMFixtureFunction group_teardown); #else # define cmocka_run_group_tests(group_tests, group_setup, group_teardown) \ _cmocka_run_group_tests(#group_tests, group_tests, sizeof(group_tests) / sizeof((group_tests)[0]), group_setup, group_teardown) #endif #ifdef DOXYGEN /** * @brief Run tests specified by an array of CMUnitTest structures and specify * a name. * * @param[in] group_name The name of the group test. * * @param[in] group_tests[] The array of unit tests to execute. * * @param[in] group_setup The setup function which should be called before * all unit tests are executed. * * @param[in] group_teardown The teardown function to be called after all * tests have finished. * * @return 0 on success, or the number of failed tests. * * @code * static int setup(void **state) { * int *answer = malloc(sizeof(int)); * if (*answer == NULL) { * return -1; * } * *answer = 42; * * *state = answer; * * return 0; * } * * static int teardown(void **state) { * free(*state); * * return 0; * } * * static void null_test_success(void **state) { * (void) state; * } * * static void int_test_success(void **state) { * int *answer = *state; * assert_int_equal(*answer, 42); * } * * int main(void) { * const struct CMUnitTest tests[] = { * cmocka_unit_test(null_test_success), * cmocka_unit_test_setup_teardown(int_test_success, setup, teardown), * }; * * return cmocka_run_group_tests_name("success_test", tests, NULL, NULL); * } * @endcode * * @see cmocka_unit_test * @see cmocka_unit_test_setup * @see cmocka_unit_test_teardown * @see cmocka_unit_test_setup_teardown */ int cmocka_run_group_tests_name(const char *group_name, const struct CMUnitTest group_tests[], CMFixtureFunction group_setup, CMFixtureFunction group_teardown); #else # define cmocka_run_group_tests_name(group_name, group_tests, group_setup, group_teardown) \ _cmocka_run_group_tests(group_name, group_tests, sizeof(group_tests) / sizeof((group_tests)[0]), group_setup, group_teardown) #endif /** @} */ /** * @defgroup cmocka_alloc Dynamic Memory Allocation * @ingroup cmocka * * Memory leaks, buffer overflows and underflows can be checked using cmocka. * * To test for memory leaks, buffer overflows and underflows a module being * tested by cmocka should replace calls to malloc(), calloc() and free() to * test_malloc(), test_calloc() and test_free() respectively. Each time a block * is deallocated using test_free() it is checked for corruption, if a corrupt * block is found a test failure is signalled. All blocks allocated using the * test_*() allocation functions are tracked by the cmocka library. When a test * completes if any allocated blocks (memory leaks) remain they are reported * and a test failure is signalled. * * For simplicity cmocka currently executes all tests in one process. Therefore * all test cases in a test application share a single address space which * means memory corruption from a single test case could potentially cause the * test application to exit prematurely. * * @{ */ #ifdef DOXYGEN /** * @brief Test function overriding malloc. * * @param[in] size The bytes which should be allocated. * * @return A pointer to the allocated memory or NULL on error. * * @code * #ifdef UNIT_TESTING * extern void* _test_malloc(const size_t size, const char* file, const int line); * * #define malloc(size) _test_malloc(size, __FILE__, __LINE__) * #endif * * void leak_memory() { * int * const temporary = (int*)malloc(sizeof(int)); * *temporary = 0; * } * @endcode * * @see malloc(3) */ void *test_malloc(size_t size); #else #define test_malloc(size) _test_malloc(size, __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Test function overriding calloc. * * The memory is set to zero. * * @param[in] nmemb The number of elements for an array to be allocated. * * @param[in] size The size in bytes of each array element to allocate. * * @return A pointer to the allocated memory, NULL on error. * * @see calloc(3) */ void *test_calloc(size_t nmemb, size_t size); #else #define test_calloc(num, size) _test_calloc(num, size, __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Test function overriding realloc which detects buffer overruns * and memoery leaks. * * @param[in] ptr The memory block which should be changed. * * @param[in] size The bytes which should be allocated. * * @return The newly allocated memory block, NULL on error. */ void *test_realloc(void *ptr, size_t size); #else #define test_realloc(ptr, size) _test_realloc(ptr, size, __FILE__, __LINE__) #endif #ifdef DOXYGEN /** * @brief Test function overriding free(3). * * @param[in] ptr The pointer to the memory space to free. * * @see free(3). */ void test_free(void *ptr); #else #define test_free(ptr) _test_free(ptr, __FILE__, __LINE__) #endif /* Redirect malloc, calloc and free to the unit test allocators. */ #ifdef UNIT_TESTING #define malloc test_malloc #define realloc test_realloc #define calloc test_calloc #define free test_free #endif /* UNIT_TESTING */ /** @} */ /** * @defgroup cmocka_mock_assert Standard Assertions * @ingroup cmocka * * How to handle assert(3) of the standard C library. * * Runtime assert macros like the standard C library's assert() should be * redefined in modules being tested to use cmocka's mock_assert() function. * Normally mock_assert() signals a test failure. If a function is called using * the expect_assert_failure() macro, any calls to mock_assert() within the * function will result in the execution of the test. If no calls to * mock_assert() occur during the function called via expect_assert_failure() a * test failure is signalled. * * @{ */ /** * @brief Function to replace assert(3) in tested code. * * In conjuction with check_assert() it's possible to determine whether an * assert condition has failed without stopping a test. * * @param[in] result The expression to assert. * * @param[in] expression The expression as string. * * @param[in] file The file mock_assert() is called. * * @param[in] line The line mock_assert() is called. * * @code * #ifdef UNIT_TESTING * extern void mock_assert(const int result, const char* const expression, * const char * const file, const int line); * * #undef assert * #define assert(expression) \ * mock_assert((int)(expression), #expression, __FILE__, __LINE__); * #endif * * void increment_value(int * const value) { * assert(value); * (*value) ++; * } * @endcode * * @see assert(3) * @see expect_assert_failure */ void mock_assert(const int result, const char* const expression, const char * const file, const int line); #ifdef DOXYGEN /** * @brief Ensure that mock_assert() is called. * * If mock_assert() is called the assert expression string is returned. * * @param[in] fn_call The function will will call mock_assert(). * * @code * #define assert mock_assert * * void showmessage(const char *message) { * assert(message); * } * * int main(int argc, const char* argv[]) { * expect_assert_failure(show_message(NULL)); * printf("succeeded\n"); * return 0; * } * @endcode * */ void expect_assert_failure(function fn_call); #else #define expect_assert_failure(function_call) \ { \ const int result = setjmp(global_expect_assert_env); \ global_expecting_assert = 1; \ if (result) { \ print_message("Expected assertion %s occurred\n", \ global_last_failed_assert); \ global_expecting_assert = 0; \ } else { \ function_call ; \ global_expecting_assert = 0; \ print_error("Expected assert in %s\n", #function_call); \ _fail(__FILE__, __LINE__); \ } \ } #endif /** @} */ /* Function prototype for setup, test and teardown functions. */ typedef void (*UnitTestFunction)(void **state); /* Function that determines whether a function parameter value is correct. */ typedef int (*CheckParameterValue)(const LargestIntegralType value, const LargestIntegralType check_value_data); /* Type of the unit test function. */ typedef enum UnitTestFunctionType { UNIT_TEST_FUNCTION_TYPE_TEST = 0, UNIT_TEST_FUNCTION_TYPE_SETUP, UNIT_TEST_FUNCTION_TYPE_TEARDOWN, UNIT_TEST_FUNCTION_TYPE_GROUP_SETUP, UNIT_TEST_FUNCTION_TYPE_GROUP_TEARDOWN, } UnitTestFunctionType; /* * Stores a unit test function with its name and type. * NOTE: Every setup function must be paired with a teardown function. It's * possible to specify NULL function pointers. */ typedef struct UnitTest { const char* name; UnitTestFunction function; UnitTestFunctionType function_type; } UnitTest; typedef struct GroupTest { UnitTestFunction setup; UnitTestFunction teardown; const UnitTest *tests; const size_t number_of_tests; } GroupTest; /* Function prototype for test functions. */ typedef void (*CMUnitTestFunction)(void **state); /* Function prototype for setup and teardown functions. */ typedef int (*CMFixtureFunction)(void **state); struct CMUnitTest { const char *name; CMUnitTestFunction test_func; CMFixtureFunction setup_func; CMFixtureFunction teardown_func; void *initial_state; }; /* Location within some source code. */ typedef struct SourceLocation { const char* file; int line; } SourceLocation; /* Event that's called to check a parameter value. */ typedef struct CheckParameterEvent { SourceLocation location; const char *parameter_name; CheckParameterValue check_value; LargestIntegralType check_value_data; } CheckParameterEvent; /* Used by expect_assert_failure() and mock_assert(). */ extern int global_expecting_assert; extern jmp_buf global_expect_assert_env; extern const char * global_last_failed_assert; /* Retrieves a value for the given function, as set by "will_return". */ LargestIntegralType _mock(const char * const function, const char* const file, const int line); void _expect_function_call( const char * const function_name, const char * const file, const int line, const int count); void _function_called(const char * const function, const char* const file, const int line); void _expect_check( const char* const function, const char* const parameter, const char* const file, const int line, const CheckParameterValue check_function, const LargestIntegralType check_data, CheckParameterEvent * const event, const int count); void _expect_in_set( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType values[], const size_t number_of_values, const int count); void _expect_not_in_set( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType values[], const size_t number_of_values, const int count); void _expect_in_range( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType minimum, const LargestIntegralType maximum, const int count); void _expect_not_in_range( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType minimum, const LargestIntegralType maximum, const int count); void _expect_value( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType value, const int count); void _expect_not_value( const char* const function, const char* const parameter, const char* const file, const int line, const LargestIntegralType value, const int count); void _expect_string( const char* const function, const char* const parameter, const char* const file, const int line, const char* string, const int count); void _expect_not_string( const char* const function, const char* const parameter, const char* const file, const int line, const char* string, const int count); void _expect_memory( const char* const function, const char* const parameter, const char* const file, const int line, const void* const memory, const size_t size, const int count); void _expect_not_memory( const char* const function, const char* const parameter, const char* const file, const int line, const void* const memory, const size_t size, const int count); void _expect_any( const char* const function, const char* const parameter, const char* const file, const int line, const int count); void _check_expected( const char * const function_name, const char * const parameter_name, const char* file, const int line, const LargestIntegralType value); void _will_return(const char * const function_name, const char * const file, const int line, const LargestIntegralType value, const int count); void _assert_true(const LargestIntegralType result, const char* const expression, const char * const file, const int line); void _assert_return_code(const LargestIntegralType result, size_t rlen, const LargestIntegralType error, const char * const expression, const char * const file, const int line); void _assert_int_equal( const LargestIntegralType a, const LargestIntegralType b, const char * const file, const int line); void _assert_int_not_equal( const LargestIntegralType a, const LargestIntegralType b, const char * const file, const int line); void _assert_string_equal(const char * const a, const char * const b, const char * const file, const int line); void _assert_string_not_equal(const char * const a, const char * const b, const char *file, const int line); void _assert_memory_equal(const void * const a, const void * const b, const size_t size, const char* const file, const int line); void _assert_memory_not_equal(const void * const a, const void * const b, const size_t size, const char* const file, const int line); void _assert_in_range( const LargestIntegralType value, const LargestIntegralType minimum, const LargestIntegralType maximum, const char* const file, const int line); void _assert_not_in_range( const LargestIntegralType value, const LargestIntegralType minimum, const LargestIntegralType maximum, const char* const file, const int line); void _assert_in_set( const LargestIntegralType value, const LargestIntegralType values[], const size_t number_of_values, const char* const file, const int line); void _assert_not_in_set( const LargestIntegralType value, const LargestIntegralType values[], const size_t number_of_values, const char* const file, const int line); void* _test_malloc(const size_t size, const char* file, const int line); void* _test_realloc(void *ptr, const size_t size, const char* file, const int line); void* _test_calloc(const size_t number_of_elements, const size_t size, const char* file, const int line); void _test_free(void* const ptr, const char* file, const int line); void _fail(const char * const file, const int line); void _skip(const char * const file, const int line); int _run_test( const char * const function_name, const UnitTestFunction Function, void ** const volatile state, const UnitTestFunctionType function_type, const void* const heap_check_point); CMOCKA_DEPRECATED int _run_tests(const UnitTest * const tests, const size_t number_of_tests); CMOCKA_DEPRECATED int _run_group_tests(const UnitTest * const tests, const size_t number_of_tests); /* Test runner */ int _cmocka_run_group_tests(const char *group_name, const struct CMUnitTest * const tests, const size_t num_tests, CMFixtureFunction group_setup, CMFixtureFunction group_teardown); /* Standard output and error print methods. */ void print_message(const char* const format, ...) CMOCKA_PRINTF_ATTRIBUTE(1, 2); void print_error(const char* const format, ...) CMOCKA_PRINTF_ATTRIBUTE(1, 2); void vprint_message(const char* const format, va_list args) CMOCKA_PRINTF_ATTRIBUTE(1, 0); void vprint_error(const char* const format, va_list args) CMOCKA_PRINTF_ATTRIBUTE(1, 0); enum cm_message_output { CM_OUTPUT_STDOUT, CM_OUTPUT_SUBUNIT, CM_OUTPUT_TAP, CM_OUTPUT_XML, }; /** * @brief Function to set the output format for a test. * * The ouput format for the test can either be set globally using this * function or overriden with environment variable CMOCKA_MESSAGE_OUTPUT. * * The environment variable can be set to either STDOUT, SUBUNIT, TAP or XML. * * @param[in] output The output format to use for the test. * */ void cmocka_set_message_output(enum cm_message_output output); /** * @brief Set a pattern to only run the test matching the pattern. * * This allows to filter tests and only run the ones matching the pattern. Thep * pattern can include two wildards. The first is '*', a wildcard that matches * zero or more characters, or ‘?’, a wildcard that matches exactly one * character. * * @param[in] pattern The pattern to match, e.g. "test_wurst*" */ void cmocka_set_test_filter(const char *pattern); /** @} */ #endif /* CMOCKA_H_ */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0092094 tevent-0.11.0/third_party/cmocka/cmocka_private.h0000660000000000000000000000760600000000000021772 0ustar00rootroot00000000000000/* * Copyright 2008 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CMOCKA_PRIVATE_H_ #define CMOCKA_PRIVATE_H_ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include #ifdef _WIN32 #include # ifdef _MSC_VER # include /* _snprintf */ # undef inline # define inline __inline # ifndef va_copy # define va_copy(dest, src) (dest = src) # endif # define strcasecmp _stricmp # define strncasecmp _strnicmp # if defined(HAVE__SNPRINTF_S) # undef snprintf # define snprintf(d, n, ...) _snprintf_s((d), (n), _TRUNCATE, __VA_ARGS__) # else /* HAVE__SNPRINTF_S */ # if defined(HAVE__SNPRINTF) # undef snprintf # define snprintf _snprintf # else /* HAVE__SNPRINTF */ # if !defined(HAVE_SNPRINTF) # error "no snprintf compatible function found" # endif /* HAVE_SNPRINTF */ # endif /* HAVE__SNPRINTF */ # endif /* HAVE__SNPRINTF_S */ # if defined(HAVE__VSNPRINTF_S) # undef vsnprintf # define vsnprintf(s, n, f, v) _vsnprintf_s((s), (n), _TRUNCATE, (f), (v)) # else /* HAVE__VSNPRINTF_S */ # if defined(HAVE__VSNPRINTF) # undef vsnprintf # define vsnprintf _vsnprintf # else # if !defined(HAVE_VSNPRINTF) # error "No vsnprintf compatible function found" # endif /* HAVE_VSNPRINTF */ # endif /* HAVE__VSNPRINTF */ # endif /* HAVE__VSNPRINTF_S */ # endif /* _MSC_VER */ /* * Backwards compatibility with headers shipped with Visual Studio 2005 and * earlier. */ WINBASEAPI BOOL WINAPI IsDebuggerPresent(VOID); #ifndef PRIdS # define PRIdS "Id" #endif #ifndef PRIu64 # define PRIu64 "I64u" #endif #ifndef PRIuMAX # define PRIuMAX PRIu64 #endif #ifndef PRIxMAX #define PRIxMAX "I64x" #endif #ifndef PRIXMAX #define PRIXMAX "I64X" #endif #else /* _WIN32 */ #ifndef __PRI64_PREFIX # if __WORDSIZE == 64 # define __PRI64_PREFIX "l" # else # define __PRI64_PREFIX "ll" # endif #endif #ifndef PRIdS # define PRIdS "zd" #endif #ifndef PRIu64 # define PRIu64 __PRI64_PREFIX "u" #endif #ifndef PRIuMAX # define PRIuMAX __PRI64_PREFIX "u" #endif #ifndef PRIxMAX #define PRIxMAX __PRI64_PREFIX "x" #endif #ifndef PRIXMAX #define PRIXMAX __PRI64_PREFIX "X" #endif #endif /* _WIN32 */ /** Free memory space */ #define SAFE_FREE(x) do { if ((x) != NULL) {free(x); x=NULL;} } while(0) /** Zero a structure */ #define ZERO_STRUCT(x) memset((char *)&(x), 0, sizeof(x)) /** Zero a structure given a pointer to the structure */ #define ZERO_STRUCTP(x) do { if ((x) != NULL) memset((char *)(x), 0, sizeof(*(x))); } while(0) /** Get the size of an array */ #define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) /** Overwrite the complete string with 'X' */ #define BURN_STRING(x) do { if ((x) != NULL) memset((x), 'X', strlen((x))); } while(0) /** * This is a hack to fix warnings. The idea is to use this everywhere that we * get the "discarding const" warning by the compiler. That doesn't actually * fix the real issue, but marks the place and you can search the code for * discard_const. * * Please use this macro only when there is no other way to fix the warning. * We should use this function in only in a very few places. * * Also, please call this via the discard_const_p() macro interface, as that * makes the return type safe. */ #define discard_const(ptr) ((void *)((uintptr_t)(ptr))) /** * Type-safe version of discard_const */ #define discard_const_p(type, ptr) ((type *)discard_const(ptr)) #endif /* CMOCKA_PRIVATE_H_ */ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2956033 tevent-0.11.0/third_party/cmocka/wscript0000660000000000000000000000124700000000000020243 0ustar00rootroot00000000000000#!/usr/bin/env python from waflib import Options def configure(conf): conf.CHECK_FUNCS('strsignal') conf.CHECK_FUNCS('longjmp siglongjmp') if conf.CHECK_CMOCKA(): conf.define('USING_SYSTEM_CMOCKA', 1) def build(bld): if bld.CONFIG_SET('USING_SYSTEM_CMOCKA'): return extra_libs='' # Link to librt if needed for clock_gettime() if bld.CONFIG_SET('HAVE_LIBRT'): extra_libs += ' rt' bld.SAMBA_LIBRARY('cmocka', source='cmocka.c', cflags='-DHAVE_CONFIG_H=1', deps=extra_libs, allow_warnings=True, private_library=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2956033 tevent-0.11.0/third_party/waf/waflib/Build.py0000660000000000000000000012477000000000000021031 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Classes related to the build phase (build, clean, install, step, etc) The inheritance tree is the following: """ import os, sys, errno, re, shutil, stat try: import cPickle except ImportError: import pickle as cPickle from waflib import Node, Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors CACHE_DIR = 'c4che' """Name of the cache directory""" CACHE_SUFFIX = '_cache.py' """ConfigSet cache files for variants are written under :py:attr:´waflib.Build.CACHE_DIR´ in the form ´variant_name´_cache.py""" INSTALL = 1337 """Positive value '->' install, see :py:attr:`waflib.Build.BuildContext.is_install`""" UNINSTALL = -1337 """Negative value '<-' uninstall, see :py:attr:`waflib.Build.BuildContext.is_install`""" SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split() """Build class members to save between the runs; these should be all dicts except for `root` which represents a :py:class:`waflib.Node.Node` instance """ CFG_FILES = 'cfg_files' """Files from the build directory to hash before starting the build (``config.h`` written during the configuration)""" POST_AT_ONCE = 0 """Post mode: all task generators are posted before any task executed""" POST_LAZY = 1 """Post mode: post the task generators group after group, the tasks in the next group are created when the tasks in the previous groups are done""" PROTOCOL = -1 if sys.platform == 'cli': PROTOCOL = 0 class BuildContext(Context.Context): '''executes the build''' cmd = 'build' variant = '' def __init__(self, **kw): super(BuildContext, self).__init__(**kw) self.is_install = 0 """Non-zero value when installing or uninstalling file""" self.top_dir = kw.get('top_dir', Context.top_dir) """See :py:attr:`waflib.Context.top_dir`; prefer :py:attr:`waflib.Build.BuildContext.srcnode`""" self.out_dir = kw.get('out_dir', Context.out_dir) """See :py:attr:`waflib.Context.out_dir`; prefer :py:attr:`waflib.Build.BuildContext.bldnode`""" self.run_dir = kw.get('run_dir', Context.run_dir) """See :py:attr:`waflib.Context.run_dir`""" self.launch_dir = Context.launch_dir """See :py:attr:`waflib.Context.out_dir`; prefer :py:meth:`waflib.Build.BuildContext.launch_node`""" self.post_mode = POST_LAZY """Whether to post the task generators at once or group-by-group (default is group-by-group)""" self.cache_dir = kw.get('cache_dir') if not self.cache_dir: self.cache_dir = os.path.join(self.out_dir, CACHE_DIR) self.all_envs = {} """Map names to :py:class:`waflib.ConfigSet.ConfigSet`, the empty string must map to the default environment""" # ======================================= # # cache variables self.node_sigs = {} """Dict mapping build nodes to task identifier (uid), it indicates whether a task created a particular file (persists across builds)""" self.task_sigs = {} """Dict mapping task identifiers (uid) to task signatures (persists across builds)""" self.imp_sigs = {} """Dict mapping task identifiers (uid) to implicit task dependencies used for scanning targets (persists across builds)""" self.node_deps = {} """Dict mapping task identifiers (uid) to node dependencies found by :py:meth:`waflib.Task.Task.scan` (persists across builds)""" self.raw_deps = {} """Dict mapping task identifiers (uid) to custom data returned by :py:meth:`waflib.Task.Task.scan` (persists across builds)""" self.task_gen_cache_names = {} self.jobs = Options.options.jobs """Amount of jobs to run in parallel""" self.targets = Options.options.targets """List of targets to build (default: \\*)""" self.keep = Options.options.keep """Whether the build should continue past errors""" self.progress_bar = Options.options.progress_bar """ Level of progress status: 0. normal output 1. progress bar 2. IDE output 3. No output at all """ # Manual dependencies. self.deps_man = Utils.defaultdict(list) """Manual dependencies set by :py:meth:`waflib.Build.BuildContext.add_manual_dependency`""" # just the structure here self.current_group = 0 """ Current build group """ self.groups = [] """ List containing lists of task generators """ self.group_names = {} """ Map group names to the group lists. See :py:meth:`waflib.Build.BuildContext.add_group` """ for v in SAVED_ATTRS: if not hasattr(self, v): setattr(self, v, {}) def get_variant_dir(self): """Getter for the variant_dir attribute""" if not self.variant: return self.out_dir return os.path.join(self.out_dir, os.path.normpath(self.variant)) variant_dir = property(get_variant_dir, None) def __call__(self, *k, **kw): """ Create a task generator and add it to the current build group. The following forms are equivalent:: def build(bld): tg = bld(a=1, b=2) def build(bld): tg = bld() tg.a = 1 tg.b = 2 def build(bld): tg = TaskGen.task_gen(a=1, b=2) bld.add_to_group(tg, None) :param group: group name to add the task generator to :type group: string """ kw['bld'] = self ret = TaskGen.task_gen(*k, **kw) self.task_gen_cache_names = {} # reset the cache, each time self.add_to_group(ret, group=kw.get('group')) return ret def __copy__(self): """ Build contexts cannot be copied :raises: :py:class:`waflib.Errors.WafError` """ raise Errors.WafError('build contexts cannot be copied') def load_envs(self): """ The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those files and stores them in :py:attr:`waflib.Build.BuildContext.allenvs`. """ node = self.root.find_node(self.cache_dir) if not node: raise Errors.WafError('The project was not configured: run "waf configure" first!') lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True) if not lst: raise Errors.WafError('The cache directory is empty: reconfigure the project') for x in lst: name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/') env = ConfigSet.ConfigSet(x.abspath()) self.all_envs[name] = env for f in env[CFG_FILES]: newnode = self.root.find_resource(f) if not newnode or not newnode.exists(): raise Errors.WafError('Missing configuration file %r, reconfigure the project!' % f) def init_dirs(self): """ Initialize the project directory and the build directory by creating the nodes :py:attr:`waflib.Build.BuildContext.srcnode` and :py:attr:`waflib.Build.BuildContext.bldnode` corresponding to ``top_dir`` and ``variant_dir`` respectively. The ``bldnode`` directory is created if necessary. """ if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): raise Errors.WafError('The project was not configured: run "waf configure" first!') self.path = self.srcnode = self.root.find_dir(self.top_dir) self.bldnode = self.root.make_node(self.variant_dir) self.bldnode.mkdir() def execute(self): """ Restore data from previous builds and call :py:meth:`waflib.Build.BuildContext.execute_build`. Overrides from :py:func:`waflib.Context.Context.execute` """ self.restore() if not self.all_envs: self.load_envs() self.execute_build() def execute_build(self): """ Execute the build by: * reading the scripts (see :py:meth:`waflib.Context.Context.recurse`) * calling :py:meth:`waflib.Build.BuildContext.pre_build` to call user build functions * calling :py:meth:`waflib.Build.BuildContext.compile` to process the tasks * calling :py:meth:`waflib.Build.BuildContext.post_build` to call user build functions """ Logs.info("Waf: Entering directory `%s'", self.variant_dir) self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() try: self.compile() finally: if self.progress_bar == 1 and sys.stderr.isatty(): c = self.producer.processed or 1 m = self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL) Logs.info(m, extra={'stream': sys.stderr, 'c1': Logs.colors.cursor_off, 'c2' : Logs.colors.cursor_on}) Logs.info("Waf: Leaving directory `%s'", self.variant_dir) try: self.producer.bld = None del self.producer except AttributeError: pass self.post_build() def restore(self): """ Load data from a previous run, sets the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS` """ try: env = ConfigSet.ConfigSet(os.path.join(self.cache_dir, 'build.config.py')) except EnvironmentError: pass else: if env.version < Context.HEXVERSION: raise Errors.WafError('Project was configured with a different version of Waf, please reconfigure it') for t in env.tools: self.setup(**t) dbfn = os.path.join(self.variant_dir, Context.DBFILE) try: data = Utils.readf(dbfn, 'rb') except (EnvironmentError, EOFError): # handle missing file/empty file Logs.debug('build: Could not load the build cache %s (missing)', dbfn) else: try: Node.pickle_lock.acquire() Node.Nod3 = self.node_class try: data = cPickle.loads(data) except Exception as e: Logs.debug('build: Could not pickle the build cache %s: %r', dbfn, e) else: for x in SAVED_ATTRS: setattr(self, x, data.get(x, {})) finally: Node.pickle_lock.release() self.init_dirs() def store(self): """ Store data for next runs, set the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`. Uses a temporary file to avoid problems on ctrl+c. """ data = {} for x in SAVED_ATTRS: data[x] = getattr(self, x) db = os.path.join(self.variant_dir, Context.DBFILE) try: Node.pickle_lock.acquire() Node.Nod3 = self.node_class x = cPickle.dumps(data, PROTOCOL) finally: Node.pickle_lock.release() Utils.writef(db + '.tmp', x, m='wb') try: st = os.stat(db) os.remove(db) if not Utils.is_win32: # win32 has no chown but we're paranoid os.chown(db + '.tmp', st.st_uid, st.st_gid) except (AttributeError, OSError): pass # do not use shutil.move (copy is not thread-safe) os.rename(db + '.tmp', db) def compile(self): """ Run the build by creating an instance of :py:class:`waflib.Runner.Parallel` The cache file is written when at least a task was executed. :raises: :py:class:`waflib.Errors.BuildError` in case the build fails """ Logs.debug('build: compile()') # delegate the producer-consumer logic to another object to reduce the complexity self.producer = Runner.Parallel(self, self.jobs) self.producer.biter = self.get_build_iterator() try: self.producer.start() except KeyboardInterrupt: if self.is_dirty(): self.store() raise else: if self.is_dirty(): self.store() if self.producer.error: raise Errors.BuildError(self.producer.error) def is_dirty(self): return self.producer.dirty def setup(self, tool, tooldir=None, funs=None): """ Import waf tools defined during the configuration:: def configure(conf): conf.load('glib2') def build(bld): pass # glib2 is imported implicitly :param tool: tool list :type tool: list :param tooldir: optional tool directory (sys.path) :type tooldir: list of string :param funs: unused variable """ if isinstance(tool, list): for i in tool: self.setup(i, tooldir) return module = Context.load_tool(tool, tooldir) if hasattr(module, "setup"): module.setup(self) def get_env(self): """Getter for the env property""" try: return self.all_envs[self.variant] except KeyError: return self.all_envs[''] def set_env(self, val): """Setter for the env property""" self.all_envs[self.variant] = val env = property(get_env, set_env) def add_manual_dependency(self, path, value): """ Adds a dependency from a node object to a value:: def build(bld): bld.add_manual_dependency( bld.path.find_resource('wscript'), bld.root.find_resource('/etc/fstab')) :param path: file path :type path: string or :py:class:`waflib.Node.Node` :param value: value to depend :type value: :py:class:`waflib.Node.Node`, byte object, or function returning a byte object """ if not path: raise ValueError('Invalid input path %r' % path) if isinstance(path, Node.Node): node = path elif os.path.isabs(path): node = self.root.find_resource(path) else: node = self.path.find_resource(path) if not node: raise ValueError('Could not find the path %r' % path) if isinstance(value, list): self.deps_man[node].extend(value) else: self.deps_man[node].append(value) def launch_node(self): """Returns the launch directory as a :py:class:`waflib.Node.Node` object (cached)""" try: # private cache return self.p_ln except AttributeError: self.p_ln = self.root.find_dir(self.launch_dir) return self.p_ln def hash_env_vars(self, env, vars_lst): """ Hashes configuration set variables:: def build(bld): bld.hash_env_vars(bld.env, ['CXX', 'CC']) This method uses an internal cache. :param env: Configuration Set :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param vars_lst: list of variables :type vars_list: list of string """ if not env.table: env = env.parent if not env: return Utils.SIG_NIL idx = str(id(env)) + str(vars_lst) try: cache = self.cache_env except AttributeError: cache = self.cache_env = {} else: try: return self.cache_env[idx] except KeyError: pass lst = [env[a] for a in vars_lst] cache[idx] = ret = Utils.h_list(lst) Logs.debug('envhash: %s %r', Utils.to_hex(ret), lst) return ret def get_tgen_by_name(self, name): """ Fetches a task generator by its name or its target attribute; the name must be unique in a build:: def build(bld): tg = bld(name='foo') tg == bld.get_tgen_by_name('foo') This method use a private internal cache. :param name: Task generator name :raises: :py:class:`waflib.Errors.WafError` in case there is no task genenerator by that name """ cache = self.task_gen_cache_names if not cache: # create the index lazily for g in self.groups: for tg in g: try: cache[tg.name] = tg except AttributeError: # raised if not a task generator, which should be uncommon pass try: return cache[name] except KeyError: raise Errors.WafError('Could not find a task generator for the name %r' % name) def progress_line(self, idx, total, col1, col2): """ Computes a progress bar line displayed when running ``waf -p`` :returns: progress bar line :rtype: string """ if not sys.stderr.isatty(): return '' n = len(str(total)) Utils.rot_idx += 1 ind = Utils.rot_chr[Utils.rot_idx % 4] pc = (100. * idx)/total fs = "[%%%dd/%%d][%%s%%2d%%%%%%s][%s][" % (n, ind) left = fs % (idx, total, col1, pc, col2) right = '][%s%s%s]' % (col1, self.timer, col2) cols = Logs.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2) if cols < 7: cols = 7 ratio = ((cols * idx)//total) - 1 bar = ('='*ratio+'>').ljust(cols) msg = Logs.indicator % (left, bar, right) return msg def declare_chain(self, *k, **kw): """ Wraps :py:func:`waflib.TaskGen.declare_chain` for convenience """ return TaskGen.declare_chain(*k, **kw) def pre_build(self): """Executes user-defined methods before the build starts, see :py:meth:`waflib.Build.BuildContext.add_pre_fun`""" for m in getattr(self, 'pre_funs', []): m(self) def post_build(self): """Executes user-defined methods after the build is successful, see :py:meth:`waflib.Build.BuildContext.add_post_fun`""" for m in getattr(self, 'post_funs', []): m(self) def add_pre_fun(self, meth): """ Binds a callback method to execute after the scripts are read and before the build starts:: def mycallback(bld): print("Hello, world!") def build(bld): bld.add_pre_fun(mycallback) """ try: self.pre_funs.append(meth) except AttributeError: self.pre_funs = [meth] def add_post_fun(self, meth): """ Binds a callback method to execute immediately after the build is successful:: def call_ldconfig(bld): bld.exec_command('/sbin/ldconfig') def build(bld): if bld.cmd == 'install': bld.add_pre_fun(call_ldconfig) """ try: self.post_funs.append(meth) except AttributeError: self.post_funs = [meth] def get_group(self, x): """ Returns the build group named `x`, or the current group if `x` is None :param x: name or number or None :type x: string, int or None """ if not self.groups: self.add_group() if x is None: return self.groups[self.current_group] if x in self.group_names: return self.group_names[x] return self.groups[x] def add_to_group(self, tgen, group=None): """Adds a task or a task generator to the build; there is no attempt to remove it if it was already added.""" assert(isinstance(tgen, TaskGen.task_gen) or isinstance(tgen, Task.Task)) tgen.bld = self self.get_group(group).append(tgen) def get_group_name(self, g): """ Returns the name of the input build group :param g: build group object or build group index :type g: integer or list :return: name :rtype: string """ if not isinstance(g, list): g = self.groups[g] for x in self.group_names: if id(self.group_names[x]) == id(g): return x return '' def get_group_idx(self, tg): """ Returns the index of the group containing the task generator given as argument:: def build(bld): tg = bld(name='nada') 0 == bld.get_group_idx(tg) :param tg: Task generator object :type tg: :py:class:`waflib.TaskGen.task_gen` :rtype: int """ se = id(tg) for i, tmp in enumerate(self.groups): for t in tmp: if id(t) == se: return i return None def add_group(self, name=None, move=True): """ Adds a new group of tasks/task generators. By default the new group becomes the default group for new task generators (make sure to create build groups in order). :param name: name for this group :type name: string :param move: set this new group as default group (True by default) :type move: bool :raises: :py:class:`waflib.Errors.WafError` if a group by the name given already exists """ if name and name in self.group_names: raise Errors.WafError('add_group: name %s already present', name) g = [] self.group_names[name] = g self.groups.append(g) if move: self.current_group = len(self.groups) - 1 def set_group(self, idx): """ Sets the build group at position idx as current so that newly added task generators are added to this one by default:: def build(bld): bld(rule='touch ${TGT}', target='foo.txt') bld.add_group() # now the current group is 1 bld(rule='touch ${TGT}', target='bar.txt') bld.set_group(0) # now the current group is 0 bld(rule='touch ${TGT}', target='truc.txt') # build truc.txt before bar.txt :param idx: group name or group index :type idx: string or int """ if isinstance(idx, str): g = self.group_names[idx] for i, tmp in enumerate(self.groups): if id(g) == id(tmp): self.current_group = i break else: self.current_group = idx def total(self): """ Approximate task count: this value may be inaccurate if task generators are posted lazily (see :py:attr:`waflib.Build.BuildContext.post_mode`). The value :py:attr:`waflib.Runner.Parallel.total` is updated during the task execution. :rtype: int """ total = 0 for group in self.groups: for tg in group: try: total += len(tg.tasks) except AttributeError: total += 1 return total def get_targets(self): """ This method returns a pair containing the index of the last build group to post, and the list of task generator objects corresponding to the target names. This is used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` to perform partial builds:: $ waf --targets=myprogram,myshlib :return: the minimum build group index, and list of task generators :rtype: tuple """ to_post = [] min_grp = 0 for name in self.targets.split(','): tg = self.get_tgen_by_name(name) m = self.get_group_idx(tg) if m > min_grp: min_grp = m to_post = [tg] elif m == min_grp: to_post.append(tg) return (min_grp, to_post) def get_all_task_gen(self): """ Returns a list of all task generators for troubleshooting purposes. """ lst = [] for g in self.groups: lst.extend(g) return lst def post_group(self): """ Post task generators from the group indexed by self.current_group; used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` """ def tgpost(tg): try: f = tg.post except AttributeError: pass else: f() if self.targets == '*': for tg in self.groups[self.current_group]: tgpost(tg) elif self.targets: if self.current_group < self._min_grp: for tg in self.groups[self.current_group]: tgpost(tg) else: for tg in self._exact_tg: tg.post() else: ln = self.launch_node() if ln.is_child_of(self.bldnode): if Logs.verbose > 1: Logs.warn('Building from the build directory, forcing --targets=*') ln = self.srcnode elif not ln.is_child_of(self.srcnode): if Logs.verbose > 1: Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)', ln.abspath(), self.srcnode.abspath()) ln = self.srcnode def is_post(tg, ln): try: p = tg.path except AttributeError: pass else: if p.is_child_of(ln): return True def is_post_group(): for i, g in enumerate(self.groups): if i > self.current_group: for tg in g: if is_post(tg, ln): return True if self.post_mode == POST_LAZY and ln != self.srcnode: # partial folder builds require all targets from a previous build group if is_post_group(): ln = self.srcnode for tg in self.groups[self.current_group]: if is_post(tg, ln): tgpost(tg) def get_tasks_group(self, idx): """ Returns all task instances for the build group at position idx, used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` :rtype: list of :py:class:`waflib.Task.Task` """ tasks = [] for tg in self.groups[idx]: try: tasks.extend(tg.tasks) except AttributeError: # not a task generator tasks.append(tg) return tasks def get_build_iterator(self): """ Creates a Python generator object that returns lists of tasks that may be processed in parallel. :return: tasks which can be executed immediately :rtype: generator returning lists of :py:class:`waflib.Task.Task` """ if self.targets and self.targets != '*': (self._min_grp, self._exact_tg) = self.get_targets() if self.post_mode != POST_LAZY: for self.current_group, _ in enumerate(self.groups): self.post_group() for self.current_group, _ in enumerate(self.groups): # first post the task generators for the group if self.post_mode != POST_AT_ONCE: self.post_group() # then extract the tasks tasks = self.get_tasks_group(self.current_group) # if the constraints are set properly (ext_in/ext_out, before/after) # the call to set_file_constraints may be removed (can be a 15% penalty on no-op rebuilds) # (but leave set_file_constraints for the installation step) # # if the tasks have only files, set_file_constraints is required but set_precedence_constraints is not necessary # Task.set_file_constraints(tasks) Task.set_precedence_constraints(tasks) self.cur_tasks = tasks if tasks: yield tasks while 1: # the build stops once there are no tasks to process yield [] def install_files(self, dest, files, **kw): """ Creates a task generator to install files on the system:: def build(bld): bld.install_files('${DATADIR}', self.path.find_resource('wscript')) :param dest: path representing the destination directory :type dest: :py:class:`waflib.Node.Node` or string (absolute path) :param files: input files :type files: list of strings or list of :py:class:`waflib.Node.Node` :param env: configuration set to expand *dest* :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param relative_trick: preserve the folder hierarchy when installing whole folders :type relative_trick: bool :param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node` :type cwd: :py:class:`waflib.Node.Node` :param postpone: execute the task immediately to perform the installation (False by default) :type postpone: bool """ assert(dest) tg = self(features='install_task', install_to=dest, install_from=files, **kw) tg.dest = tg.install_to tg.type = 'install_files' if not kw.get('postpone', True): tg.post() return tg def install_as(self, dest, srcfile, **kw): """ Creates a task generator to install a file on the system with a different name:: def build(bld): bld.install_as('${PREFIX}/bin', 'myapp', chmod=Utils.O755) :param dest: destination file :type dest: :py:class:`waflib.Node.Node` or string (absolute path) :param srcfile: input file :type srcfile: string or :py:class:`waflib.Node.Node` :param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node` :type cwd: :py:class:`waflib.Node.Node` :param env: configuration set for performing substitutions in dest :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param postpone: execute the task immediately to perform the installation (False by default) :type postpone: bool """ assert(dest) tg = self(features='install_task', install_to=dest, install_from=srcfile, **kw) tg.dest = tg.install_to tg.type = 'install_as' if not kw.get('postpone', True): tg.post() return tg def symlink_as(self, dest, src, **kw): """ Creates a task generator to install a symlink:: def build(bld): bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3') :param dest: absolute path of the symlink :type dest: :py:class:`waflib.Node.Node` or string (absolute path) :param src: link contents, which is a relative or absolute path which may exist or not :type src: string :param env: configuration set for performing substitutions in dest :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param add: add the task created to a build group - set ``False`` only if the installation task is created after the build has started :type add: bool :param postpone: execute the task immediately to perform the installation :type postpone: bool :param relative_trick: make the symlink relative (default: ``False``) :type relative_trick: bool """ assert(dest) tg = self(features='install_task', install_to=dest, install_from=src, **kw) tg.dest = tg.install_to tg.type = 'symlink_as' tg.link = src # TODO if add: self.add_to_group(tsk) if not kw.get('postpone', True): tg.post() return tg @TaskGen.feature('install_task') @TaskGen.before_method('process_rule', 'process_source') def process_install_task(self): """Creates the installation task for the current task generator; uses :py:func:`waflib.Build.add_install_task` internally.""" self.add_install_task(**self.__dict__) @TaskGen.taskgen_method def add_install_task(self, **kw): """ Creates the installation task for the current task generator, and executes it immediately if necessary :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ if not self.bld.is_install: return if not kw['install_to']: return if kw['type'] == 'symlink_as' and Utils.is_win32: if kw.get('win32_install'): kw['type'] = 'install_as' else: # just exit return tsk = self.install_task = self.create_task('inst') tsk.chmod = kw.get('chmod', Utils.O644) tsk.link = kw.get('link', '') or kw.get('install_from', '') tsk.relative_trick = kw.get('relative_trick', False) tsk.type = kw['type'] tsk.install_to = tsk.dest = kw['install_to'] tsk.install_from = kw['install_from'] tsk.relative_base = kw.get('cwd') or kw.get('relative_base', self.path) tsk.install_user = kw.get('install_user') tsk.install_group = kw.get('install_group') tsk.init_files() if not kw.get('postpone', True): tsk.run_now() return tsk @TaskGen.taskgen_method def add_install_files(self, **kw): """ Creates an installation task for files :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ kw['type'] = 'install_files' return self.add_install_task(**kw) @TaskGen.taskgen_method def add_install_as(self, **kw): """ Creates an installation task for a single file :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ kw['type'] = 'install_as' return self.add_install_task(**kw) @TaskGen.taskgen_method def add_symlink_as(self, **kw): """ Creates an installation task for a symbolic link :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ kw['type'] = 'symlink_as' return self.add_install_task(**kw) class inst(Task.Task): """Task that installs files or symlinks; it is typically executed by :py:class:`waflib.Build.InstallContext` and :py:class:`waflib.Build.UnInstallContext`""" def __str__(self): """Returns an empty string to disable the standard task display""" return '' def uid(self): """Returns a unique identifier for the task""" lst = self.inputs + self.outputs + [self.link, self.generator.path.abspath()] return Utils.h_list(lst) def init_files(self): """ Initializes the task input and output nodes """ if self.type == 'symlink_as': inputs = [] else: inputs = self.generator.to_nodes(self.install_from) if self.type == 'install_as': assert len(inputs) == 1 self.set_inputs(inputs) dest = self.get_install_path() outputs = [] if self.type == 'symlink_as': if self.relative_trick: self.link = os.path.relpath(self.link, os.path.dirname(dest)) outputs.append(self.generator.bld.root.make_node(dest)) elif self.type == 'install_as': outputs.append(self.generator.bld.root.make_node(dest)) else: for y in inputs: if self.relative_trick: destfile = os.path.join(dest, y.path_from(self.relative_base)) else: destfile = os.path.join(dest, y.name) outputs.append(self.generator.bld.root.make_node(destfile)) self.set_outputs(outputs) def runnable_status(self): """ Installation tasks are always executed, so this method returns either :py:const:`waflib.Task.ASK_LATER` or :py:const:`waflib.Task.RUN_ME`. """ ret = super(inst, self).runnable_status() if ret == Task.SKIP_ME and self.generator.bld.is_install: return Task.RUN_ME return ret def post_run(self): """ Disables any post-run operations """ pass def get_install_path(self, destdir=True): """ Returns the destination path where files will be installed, pre-pending `destdir`. Relative paths will be interpreted relative to `PREFIX` if no `destdir` is given. :rtype: string """ if isinstance(self.install_to, Node.Node): dest = self.install_to.abspath() else: dest = os.path.normpath(Utils.subst_vars(self.install_to, self.env)) if not os.path.isabs(dest): dest = os.path.join(self.env.PREFIX, dest) if destdir and Options.options.destdir: dest = os.path.join(Options.options.destdir, os.path.splitdrive(dest)[1].lstrip(os.sep)) return dest def copy_fun(self, src, tgt): """ Copies a file from src to tgt, preserving permissions and trying to work around path limitations on Windows platforms. On Unix-like platforms, the owner/group of the target file may be set through install_user/install_group :param src: absolute path :type src: string :param tgt: absolute path :type tgt: string """ # override this if you want to strip executables # kw['tsk'].source is the task that created the files in the build if Utils.is_win32 and len(tgt) > 259 and not tgt.startswith('\\\\?\\'): tgt = '\\\\?\\' + tgt shutil.copy2(src, tgt) self.fix_perms(tgt) def rm_empty_dirs(self, tgt): """ Removes empty folders recursively when uninstalling. :param tgt: absolute path :type tgt: string """ while tgt: tgt = os.path.dirname(tgt) try: os.rmdir(tgt) except OSError: break def run(self): """ Performs file or symlink installation """ is_install = self.generator.bld.is_install if not is_install: # unnecessary? return for x in self.outputs: if is_install == INSTALL: x.parent.mkdir() if self.type == 'symlink_as': fun = is_install == INSTALL and self.do_link or self.do_unlink fun(self.link, self.outputs[0].abspath()) else: fun = is_install == INSTALL and self.do_install or self.do_uninstall launch_node = self.generator.bld.launch_node() for x, y in zip(self.inputs, self.outputs): fun(x.abspath(), y.abspath(), x.path_from(launch_node)) def run_now(self): """ Try executing the installation task right now :raises: :py:class:`waflib.Errors.TaskNotReady` """ status = self.runnable_status() if status not in (Task.RUN_ME, Task.SKIP_ME): raise Errors.TaskNotReady('Could not process %r: status %r' % (self, status)) self.run() self.hasrun = Task.SUCCESS def do_install(self, src, tgt, lbl, **kw): """ Copies a file from src to tgt with given file permissions. The actual copy is only performed if the source and target file sizes or timestamps differ. When the copy occurs, the file is always first removed and then copied so as to prevent stale inodes. :param src: file name as absolute path :type src: string :param tgt: file destination, as absolute path :type tgt: string :param lbl: file source description :type lbl: string :param chmod: installation mode :type chmod: int :raises: :py:class:`waflib.Errors.WafError` if the file cannot be written """ if not Options.options.force: # check if the file is already there to avoid a copy try: st1 = os.stat(tgt) st2 = os.stat(src) except OSError: pass else: # same size and identical timestamps -> make no copy if st1.st_mtime + 2 >= st2.st_mtime and st1.st_size == st2.st_size: if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- install %s%s%s (from %s)', c1, c2, tgt, c1, lbl) return False if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s+ install %s%s%s (from %s)', c1, c2, tgt, c1, lbl) # Give best attempt at making destination overwritable, # like the 'install' utility used by 'make install' does. try: os.chmod(tgt, Utils.O644 | stat.S_IMODE(os.stat(tgt).st_mode)) except EnvironmentError: pass # following is for shared libs and stale inodes (-_-) try: os.remove(tgt) except OSError: pass try: self.copy_fun(src, tgt) except EnvironmentError as e: if not os.path.exists(src): Logs.error('File %r does not exist', src) elif not os.path.isfile(src): Logs.error('Input %r is not a file', src) raise Errors.WafError('Could not install the file %r' % tgt, e) def fix_perms(self, tgt): """ Change the ownership of the file/folder/link pointed by the given path This looks up for `install_user` or `install_group` attributes on the task or on the task generator:: def build(bld): bld.install_as('${PREFIX}/wscript', 'wscript', install_user='nobody', install_group='nogroup') bld.symlink_as('${PREFIX}/wscript_link', Utils.subst_vars('${PREFIX}/wscript', bld.env), install_user='nobody', install_group='nogroup') """ if not Utils.is_win32: user = getattr(self, 'install_user', None) or getattr(self.generator, 'install_user', None) group = getattr(self, 'install_group', None) or getattr(self.generator, 'install_group', None) if user or group: Utils.lchown(tgt, user or -1, group or -1) if not os.path.islink(tgt): os.chmod(tgt, self.chmod) def do_link(self, src, tgt, **kw): """ Creates a symlink from tgt to src. :param src: file name as absolute path :type src: string :param tgt: file destination, as absolute path :type tgt: string """ if os.path.islink(tgt) and os.readlink(tgt) == src: if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- symlink %s%s%s (to %s)', c1, c2, tgt, c1, src) else: try: os.remove(tgt) except OSError: pass if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s+ symlink %s%s%s (to %s)', c1, c2, tgt, c1, src) os.symlink(src, tgt) self.fix_perms(tgt) def do_uninstall(self, src, tgt, lbl, **kw): """ See :py:meth:`waflib.Build.inst.do_install` """ if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- remove %s%s%s', c1, c2, tgt, c1) #self.uninstall.append(tgt) try: os.remove(tgt) except OSError as e: if e.errno != errno.ENOENT: if not getattr(self, 'uninstall_error', None): self.uninstall_error = True Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)') if Logs.verbose > 1: Logs.warn('Could not remove %s (error code %r)', e.filename, e.errno) self.rm_empty_dirs(tgt) def do_unlink(self, src, tgt, **kw): """ See :py:meth:`waflib.Build.inst.do_link` """ try: if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- remove %s%s%s', c1, c2, tgt, c1) os.remove(tgt) except OSError: pass self.rm_empty_dirs(tgt) class InstallContext(BuildContext): '''installs the targets on the system''' cmd = 'install' def __init__(self, **kw): super(InstallContext, self).__init__(**kw) self.is_install = INSTALL class UninstallContext(InstallContext): '''removes the targets installed''' cmd = 'uninstall' def __init__(self, **kw): super(UninstallContext, self).__init__(**kw) self.is_install = UNINSTALL class CleanContext(BuildContext): '''cleans the project''' cmd = 'clean' def execute(self): """ See :py:func:`waflib.Build.BuildContext.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) try: self.clean() finally: self.store() def clean(self): """ Remove most files from the build directory, and reset all caches. Custom lists of files to clean can be declared as `bld.clean_files`. For example, exclude `build/program/myprogram` from getting removed:: def build(bld): bld.clean_files = bld.bldnode.ant_glob('**', excl='.lock* config.log c4che/* config.h program/myprogram', quiet=True, generator=True) """ Logs.debug('build: clean called') if hasattr(self, 'clean_files'): for n in self.clean_files: n.delete() elif self.bldnode != self.srcnode: # would lead to a disaster if top == out lst = [] for env in self.all_envs.values(): lst.extend(self.root.find_or_declare(f) for f in env[CFG_FILES]) excluded_dirs = '.lock* *conf_check_*/** config.log %s/*' % CACHE_DIR for n in self.bldnode.ant_glob('**/*', excl=excluded_dirs, quiet=True): if n in lst: continue n.delete() self.root.children = {} for v in SAVED_ATTRS: if v == 'root': continue setattr(self, v, {}) class ListContext(BuildContext): '''lists the targets to execute''' cmd = 'list' def execute(self): """ In addition to printing the name of each build target, a description column will include text for each task generator which has a "description" field set. See :py:func:`waflib.Build.BuildContext.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() try: # force the cache initialization self.get_tgen_by_name('') except Errors.WafError: pass targets = sorted(self.task_gen_cache_names) # figure out how much to left-justify, for largest target name line_just = max(len(t) for t in targets) if targets else 0 for target in targets: tgen = self.task_gen_cache_names[target] # Support displaying the description for the target # if it was set on the tgen descript = getattr(tgen, 'description', '') if descript: target = target.ljust(line_just) descript = ': %s' % descript Logs.pprint('GREEN', target, label=descript) class StepContext(BuildContext): '''executes tasks in a step-by-step fashion, for debugging''' cmd = 'step' def __init__(self, **kw): super(StepContext, self).__init__(**kw) self.files = Options.options.files def compile(self): """ Overrides :py:meth:`waflib.Build.BuildContext.compile` to perform a partial build on tasks matching the input/output pattern given (regular expression matching):: $ waf step --files=foo.c,bar.c,in:truc.c,out:bar.o $ waf step --files=in:foo.cpp.1.o # link task only """ if not self.files: Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"') BuildContext.compile(self) return targets = [] if self.targets and self.targets != '*': targets = self.targets.split(',') for g in self.groups: for tg in g: if targets and tg.name not in targets: continue try: f = tg.post except AttributeError: pass else: f() for pat in self.files.split(','): matcher = self.get_matcher(pat) for tg in g: if isinstance(tg, Task.Task): lst = [tg] else: lst = tg.tasks for tsk in lst: do_exec = False for node in tsk.inputs: if matcher(node, output=False): do_exec = True break for node in tsk.outputs: if matcher(node, output=True): do_exec = True break if do_exec: ret = tsk.run() Logs.info('%s -> exit %r', tsk, ret) def get_matcher(self, pat): """ Converts a step pattern into a function :param: pat: pattern of the form in:truc.c,out:bar.o :returns: Python function that uses Node objects as inputs and returns matches :rtype: function """ # this returns a function inn = True out = True if pat.startswith('in:'): out = False pat = pat.replace('in:', '') elif pat.startswith('out:'): inn = False pat = pat.replace('out:', '') anode = self.root.find_node(pat) pattern = None if not anode: if not pat.startswith('^'): pat = '^.+?%s' % pat if not pat.endswith('$'): pat = '%s$' % pat pattern = re.compile(pat) def match(node, output): if output and not out: return False if not output and not inn: return False if anode: return anode == node else: return pattern.match(node.abspath()) return match class EnvContext(BuildContext): """Subclass EnvContext to create commands that require configuration data in 'env'""" fun = cmd = None def execute(self): """ See :py:func:`waflib.Build.BuildContext.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/ConfigSet.py0000660000000000000000000002014600000000000021643 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ ConfigSet: a special dict The values put in :py:class:`ConfigSet` must be serializable (dicts, lists, strings) """ import copy, re, os from waflib import Logs, Utils re_imp = re.compile(r'^(#)*?([^#=]*?)\ =\ (.*?)$', re.M) class ConfigSet(object): """ A copy-on-write dict with human-readable serialized format. The serialization format is human-readable (python-like) and performed by using eval() and repr(). For high performance prefer pickle. Do not store functions as they are not serializable. The values can be accessed by attributes or by keys:: from waflib.ConfigSet import ConfigSet env = ConfigSet() env.FOO = 'test' env['FOO'] = 'test' """ __slots__ = ('table', 'parent') def __init__(self, filename=None): self.table = {} """ Internal dict holding the object values """ #self.parent = None if filename: self.load(filename) def __contains__(self, key): """ Enables the *in* syntax:: if 'foo' in env: print(env['foo']) """ if key in self.table: return True try: return self.parent.__contains__(key) except AttributeError: return False # parent may not exist def keys(self): """Dict interface""" keys = set() cur = self while cur: keys.update(cur.table.keys()) cur = getattr(cur, 'parent', None) keys = list(keys) keys.sort() return keys def __iter__(self): return iter(self.keys()) def __str__(self): """Text representation of the ConfigSet (for debugging purposes)""" return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in self.keys()]) def __getitem__(self, key): """ Dictionary interface: get value from key:: def configure(conf): conf.env['foo'] = {} print(env['foo']) """ try: while 1: x = self.table.get(key) if not x is None: return x self = self.parent except AttributeError: return [] def __setitem__(self, key, value): """ Dictionary interface: set value from key """ self.table[key] = value def __delitem__(self, key): """ Dictionary interface: mark the value as missing """ self[key] = [] def __getattr__(self, name): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): conf.env.value conf.env['value'] """ if name in self.__slots__: return object.__getattribute__(self, name) else: return self[name] def __setattr__(self, name, value): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): conf.env.value = x env['value'] = x """ if name in self.__slots__: object.__setattr__(self, name, value) else: self[name] = value def __delattr__(self, name): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): del env.value del env['value'] """ if name in self.__slots__: object.__delattr__(self, name) else: del self[name] def derive(self): """ Returns a new ConfigSet deriving from self. The copy returned will be a shallow copy:: from waflib.ConfigSet import ConfigSet env = ConfigSet() env.append_value('CFLAGS', ['-O2']) child = env.derive() child.CFLAGS.append('test') # warning! this will modify 'env' child.CFLAGS = ['-O3'] # new list, ok child.append_value('CFLAGS', ['-O3']) # ok Use :py:func:`ConfigSet.detach` to detach the child from the parent. """ newenv = ConfigSet() newenv.parent = self return newenv def detach(self): """ Detaches this instance from its parent (if present) Modifying the parent :py:class:`ConfigSet` will not change the current object Modifying this :py:class:`ConfigSet` will not modify the parent one. """ tbl = self.get_merged_dict() try: delattr(self, 'parent') except AttributeError: pass else: keys = tbl.keys() for x in keys: tbl[x] = copy.deepcopy(tbl[x]) self.table = tbl return self def get_flat(self, key): """ Returns a value as a string. If the input is a list, the value returned is space-separated. :param key: key to use :type key: string """ s = self[key] if isinstance(s, str): return s return ' '.join(s) def _get_list_value_for_modification(self, key): """ Returns a list value for further modification. The list may be modified inplace and there is no need to do this afterwards:: self.table[var] = value """ try: value = self.table[key] except KeyError: try: value = self.parent[key] except AttributeError: value = [] else: if isinstance(value, list): # force a copy value = value[:] else: value = [value] self.table[key] = value else: if not isinstance(value, list): self.table[key] = value = [value] return value def append_value(self, var, val): """ Appends a value to the specified config key:: def build(bld): bld.env.append_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): # if there were string everywhere we could optimize this val = [val] current_value = self._get_list_value_for_modification(var) current_value.extend(val) def prepend_value(self, var, val): """ Prepends a value to the specified item:: def configure(conf): conf.env.prepend_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] self.table[var] = val + self._get_list_value_for_modification(var) def append_unique(self, var, val): """ Appends a value to the specified item only if it's not already present:: def build(bld): bld.env.append_unique('CFLAGS', ['-O2', '-g']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] current_value = self._get_list_value_for_modification(var) for x in val: if x not in current_value: current_value.append(x) def get_merged_dict(self): """ Computes the merged dictionary from the fusion of self and all its parent :rtype: a ConfigSet object """ table_list = [] env = self while 1: table_list.insert(0, env.table) try: env = env.parent except AttributeError: break merged_table = {} for table in table_list: merged_table.update(table) return merged_table def store(self, filename): """ Serializes the :py:class:`ConfigSet` data to a file. See :py:meth:`ConfigSet.load` for reading such files. :param filename: file to use :type filename: string """ try: os.makedirs(os.path.split(filename)[0]) except OSError: pass buf = [] merged_table = self.get_merged_dict() keys = list(merged_table.keys()) keys.sort() try: fun = ascii except NameError: fun = repr for k in keys: if k != 'undo_stack': buf.append('%s = %s\n' % (k, fun(merged_table[k]))) Utils.writef(filename, ''.join(buf)) def load(self, filename): """ Restores contents from a file (current values are not cleared). Files are written using :py:meth:`ConfigSet.store`. :param filename: file to use :type filename: string """ tbl = self.table code = Utils.readf(filename, m='r') for m in re_imp.finditer(code): g = m.group tbl[g(2)] = eval(g(3)) Logs.debug('env: %s', self.table) def update(self, d): """ Dictionary interface: replace values with the ones from another dict :param d: object to use the value from :type d: dict-like object """ self.table.update(d) def stash(self): """ Stores the object state to provide transactionality semantics:: env = ConfigSet() env.stash() try: env.append_value('CFLAGS', '-O3') call_some_method(env) finally: env.revert() The history is kept in a stack, and is lost during the serialization by :py:meth:`ConfigSet.store` """ orig = self.table tbl = self.table = self.table.copy() for x in tbl.keys(): tbl[x] = copy.deepcopy(tbl[x]) self.undo_stack = self.undo_stack + [orig] def commit(self): """ Commits transactional changes. See :py:meth:`ConfigSet.stash` """ self.undo_stack.pop(-1) def revert(self): """ Reverts the object to a previous state. See :py:meth:`ConfigSet.stash` """ self.table = self.undo_stack.pop(-1) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Configure.py0000660000000000000000000004537700000000000021720 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Configuration system A :py:class:`waflib.Configure.ConfigurationContext` instance is created when ``waf configure`` is called, it is used to: * create data dictionaries (ConfigSet instances) * store the list of modules to import * hold configuration routines such as ``find_program``, etc """ import os, re, shlex, shutil, sys, time, traceback from waflib import ConfigSet, Utils, Options, Logs, Context, Build, Errors WAF_CONFIG_LOG = 'config.log' """Name of the configuration log file""" autoconfig = False """Execute the configuration automatically""" conf_template = '''# project %(app)s configured on %(now)s by # waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s) # using %(args)s #''' class ConfigurationContext(Context.Context): '''configures the project''' cmd = 'configure' error_handlers = [] """ Additional functions to handle configuration errors """ def __init__(self, **kw): super(ConfigurationContext, self).__init__(**kw) self.environ = dict(os.environ) self.all_envs = {} self.top_dir = None self.out_dir = None self.tools = [] # tools loaded in the configuration, and that will be loaded when building self.hash = 0 self.files = [] self.tool_cache = [] self.setenv('') def setenv(self, name, env=None): """ Set a new config set for conf.env. If a config set of that name already exists, recall it without modification. The name is the filename prefix to save to ``c4che/NAME_cache.py``, and it is also used as *variants* by the build commands. Though related to variants, whatever kind of data may be stored in the config set:: def configure(cfg): cfg.env.ONE = 1 cfg.setenv('foo') cfg.env.ONE = 2 def build(bld): 2 == bld.env_of_name('foo').ONE :param name: name of the configuration set :type name: string :param env: ConfigSet to copy, or an empty ConfigSet is created :type env: :py:class:`waflib.ConfigSet.ConfigSet` """ if name not in self.all_envs or env: if not env: env = ConfigSet.ConfigSet() self.prepare_env(env) else: env = env.derive() self.all_envs[name] = env self.variant = name def get_env(self): """Getter for the env property""" return self.all_envs[self.variant] def set_env(self, val): """Setter for the env property""" self.all_envs[self.variant] = val env = property(get_env, set_env) def init_dirs(self): """ Initialize the project directory and the build directory """ top = self.top_dir if not top: top = Options.options.top if not top: top = getattr(Context.g_module, Context.TOP, None) if not top: top = self.path.abspath() top = os.path.abspath(top) self.srcnode = (os.path.isabs(top) and self.root or self.path).find_dir(top) assert(self.srcnode) out = self.out_dir if not out: out = Options.options.out if not out: out = getattr(Context.g_module, Context.OUT, None) if not out: out = Options.lockfile.replace('.lock-waf_%s_' % sys.platform, '').replace('.lock-waf', '') # someone can be messing with symlinks out = os.path.realpath(out) self.bldnode = (os.path.isabs(out) and self.root or self.path).make_node(out) self.bldnode.mkdir() if not os.path.isdir(self.bldnode.abspath()): self.fatal('Could not create the build directory %s' % self.bldnode.abspath()) def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ self.init_dirs() self.cachedir = self.bldnode.make_node(Build.CACHE_DIR) self.cachedir.mkdir() path = os.path.join(self.bldnode.abspath(), WAF_CONFIG_LOG) self.logger = Logs.make_logger(path, 'cfg') app = getattr(Context.g_module, 'APPNAME', '') if app: ver = getattr(Context.g_module, 'VERSION', '') if ver: app = "%s (%s)" % (app, ver) params = {'now': time.ctime(), 'pyver': sys.hexversion, 'systype': sys.platform, 'args': " ".join(sys.argv), 'wafver': Context.WAFVERSION, 'abi': Context.ABI, 'app': app} self.to_log(conf_template % params) self.msg('Setting top to', self.srcnode.abspath()) self.msg('Setting out to', self.bldnode.abspath()) if id(self.srcnode) == id(self.bldnode): Logs.warn('Setting top == out') elif id(self.path) != id(self.srcnode): if self.srcnode.is_child_of(self.path): Logs.warn('Are you certain that you do not want to set top="." ?') super(ConfigurationContext, self).execute() self.store() Context.top_dir = self.srcnode.abspath() Context.out_dir = self.bldnode.abspath() # this will write a configure lock so that subsequent builds will # consider the current path as the root directory (see prepare_impl). # to remove: use 'waf distclean' env = ConfigSet.ConfigSet() env.argv = sys.argv env.options = Options.options.__dict__ env.config_cmd = self.cmd env.run_dir = Context.run_dir env.top_dir = Context.top_dir env.out_dir = Context.out_dir # conf.hash & conf.files hold wscript files paths and hash # (used only by Configure.autoconfig) env.hash = self.hash env.files = self.files env.environ = dict(self.environ) env.launch_dir = Context.launch_dir if not (self.env.NO_LOCK_IN_RUN or env.environ.get('NO_LOCK_IN_RUN') or getattr(Options.options, 'no_lock_in_run')): env.store(os.path.join(Context.run_dir, Options.lockfile)) if not (self.env.NO_LOCK_IN_TOP or env.environ.get('NO_LOCK_IN_TOP') or getattr(Options.options, 'no_lock_in_top')): env.store(os.path.join(Context.top_dir, Options.lockfile)) if not (self.env.NO_LOCK_IN_OUT or env.environ.get('NO_LOCK_IN_OUT') or getattr(Options.options, 'no_lock_in_out')): env.store(os.path.join(Context.out_dir, Options.lockfile)) def prepare_env(self, env): """ Insert *PREFIX*, *BINDIR* and *LIBDIR* values into ``env`` :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param env: a ConfigSet, usually ``conf.env`` """ if not env.PREFIX: if Options.options.prefix or Utils.is_win32: env.PREFIX = Options.options.prefix else: env.PREFIX = '/' if not env.BINDIR: if Options.options.bindir: env.BINDIR = Options.options.bindir else: env.BINDIR = Utils.subst_vars('${PREFIX}/bin', env) if not env.LIBDIR: if Options.options.libdir: env.LIBDIR = Options.options.libdir else: env.LIBDIR = Utils.subst_vars('${PREFIX}/lib%s' % Utils.lib64(), env) def store(self): """Save the config results into the cache file""" n = self.cachedir.make_node('build.config.py') n.write('version = 0x%x\ntools = %r\n' % (Context.HEXVERSION, self.tools)) if not self.all_envs: self.fatal('nothing to store in the configuration context!') for key in self.all_envs: tmpenv = self.all_envs[key] tmpenv.store(os.path.join(self.cachedir.abspath(), key + Build.CACHE_SUFFIX)) def load(self, tool_list, tooldir=None, funs=None, with_sys_path=True, cache=False): """ Load Waf tools, which will be imported whenever a build is started. :param tool_list: waf tools to import :type tool_list: list of string :param tooldir: paths for the imports :type tooldir: list of string :param funs: functions to execute from the waf tools :type funs: list of string :param cache: whether to prevent the tool from running twice :type cache: bool """ tools = Utils.to_list(tool_list) if tooldir: tooldir = Utils.to_list(tooldir) for tool in tools: # avoid loading the same tool more than once with the same functions # used by composite projects if cache: mag = (tool, id(self.env), tooldir, funs) if mag in self.tool_cache: self.to_log('(tool %s is already loaded, skipping)' % tool) continue self.tool_cache.append(mag) module = None try: module = Context.load_tool(tool, tooldir, ctx=self, with_sys_path=with_sys_path) except ImportError as e: self.fatal('Could not load the Waf tool %r from %r\n%s' % (tool, getattr(e, 'waf_sys_path', sys.path), e)) except Exception as e: self.to_log('imp %r (%r & %r)' % (tool, tooldir, funs)) self.to_log(traceback.format_exc()) raise if funs is not None: self.eval_rules(funs) else: func = getattr(module, 'configure', None) if func: if type(func) is type(Utils.readf): func(self) else: self.eval_rules(func) self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs}) def post_recurse(self, node): """ Records the path and a hash of the scripts visited, see :py:meth:`waflib.Context.Context.post_recurse` :param node: script :type node: :py:class:`waflib.Node.Node` """ super(ConfigurationContext, self).post_recurse(node) self.hash = Utils.h_list((self.hash, node.read('rb'))) self.files.append(node.abspath()) def eval_rules(self, rules): """ Execute configuration tests provided as list of functions to run :param rules: list of configuration method names :type rules: list of string """ self.rules = Utils.to_list(rules) for x in self.rules: f = getattr(self, x) if not f: self.fatal('No such configuration function %r' % x) f() def conf(f): """ Decorator: attach new configuration functions to :py:class:`waflib.Build.BuildContext` and :py:class:`waflib.Configure.ConfigurationContext`. The methods bound will accept a parameter named 'mandatory' to disable the configuration errors:: def configure(conf): conf.find_program('abc', mandatory=False) :param f: method to bind :type f: function """ def fun(*k, **kw): mandatory = kw.pop('mandatory', True) try: return f(*k, **kw) except Errors.ConfigurationError: if mandatory: raise fun.__name__ = f.__name__ setattr(ConfigurationContext, f.__name__, fun) setattr(Build.BuildContext, f.__name__, fun) return f @conf def add_os_flags(self, var, dest=None, dup=False): """ Import operating system environment values into ``conf.env`` dict:: def configure(conf): conf.add_os_flags('CFLAGS') :param var: variable to use :type var: string :param dest: destination variable, by default the same as var :type dest: string :param dup: add the same set of flags again :type dup: bool """ try: flags = shlex.split(self.environ[var]) except KeyError: return if dup or ''.join(flags) not in ''.join(Utils.to_list(self.env[dest or var])): self.env.append_value(dest or var, flags) @conf def cmd_to_list(self, cmd): """ Detect if a command is written in pseudo shell like ``ccache g++`` and return a list. :param cmd: command :type cmd: a string or a list of string """ if isinstance(cmd, str): if os.path.isfile(cmd): # do not take any risk return [cmd] if os.sep == '/': return shlex.split(cmd) else: try: return shlex.split(cmd, posix=False) except TypeError: # Python 2.5 on windows? return shlex.split(cmd) return cmd @conf def check_waf_version(self, mini='1.9.99', maxi='2.1.0', **kw): """ Raise a Configuration error if the Waf version does not strictly match the given bounds:: conf.check_waf_version(mini='1.9.99', maxi='2.1.0') :type mini: number, tuple or string :param mini: Minimum required version :type maxi: number, tuple or string :param maxi: Maximum allowed version """ self.start_msg('Checking for waf version in %s-%s' % (str(mini), str(maxi)), **kw) ver = Context.HEXVERSION if Utils.num2ver(mini) > ver: self.fatal('waf version should be at least %r (%r found)' % (Utils.num2ver(mini), ver)) if Utils.num2ver(maxi) < ver: self.fatal('waf version should be at most %r (%r found)' % (Utils.num2ver(maxi), ver)) self.end_msg('ok', **kw) @conf def find_file(self, filename, path_list=[]): """ Find a file in a list of paths :param filename: name of the file to search for :param path_list: list of directories to search :return: the first matching filename; else a configuration exception is raised """ for n in Utils.to_list(filename): for d in Utils.to_list(path_list): p = os.path.expanduser(os.path.join(d, n)) if os.path.exists(p): return p self.fatal('Could not find %r' % filename) @conf def find_program(self, filename, **kw): """ Search for a program on the operating system When var is used, you may set os.environ[var] to help find a specific program version, for example:: $ CC='ccache gcc' waf configure :param path_list: paths to use for searching :type param_list: list of string :param var: store the result to conf.env[var] where var defaults to filename.upper() if not provided; the result is stored as a list of strings :type var: string :param value: obtain the program from the value passed exclusively :type value: list or string (list is preferred) :param exts: list of extensions for the binary (do not add an extension for portability) :type exts: list of string :param msg: name to display in the log, by default filename is used :type msg: string :param interpreter: interpreter for the program :type interpreter: ConfigSet variable key :raises: :py:class:`waflib.Errors.ConfigurationError` """ exts = kw.get('exts', Utils.is_win32 and '.exe,.com,.bat,.cmd' or ',.sh,.pl,.py') environ = kw.get('environ', getattr(self, 'environ', os.environ)) ret = '' filename = Utils.to_list(filename) msg = kw.get('msg', ', '.join(filename)) var = kw.get('var', '') if not var: var = re.sub(r'[-.]', '_', filename[0].upper()) path_list = kw.get('path_list', '') if path_list: path_list = Utils.to_list(path_list) else: path_list = environ.get('PATH', '').split(os.pathsep) if kw.get('value'): # user-provided in command-line options and passed to find_program ret = self.cmd_to_list(kw['value']) elif environ.get(var): # user-provided in the os environment ret = self.cmd_to_list(environ[var]) elif self.env[var]: # a default option in the wscript file ret = self.cmd_to_list(self.env[var]) else: if not ret: ret = self.find_binary(filename, exts.split(','), path_list) if not ret and Utils.winreg: ret = Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER, filename) if not ret and Utils.winreg: ret = Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE, filename) ret = self.cmd_to_list(ret) if ret: if len(ret) == 1: retmsg = ret[0] else: retmsg = ret else: retmsg = False self.msg('Checking for program %r' % msg, retmsg, **kw) if not kw.get('quiet'): self.to_log('find program=%r paths=%r var=%r -> %r' % (filename, path_list, var, ret)) if not ret: self.fatal(kw.get('errmsg', '') or 'Could not find the program %r' % filename) interpreter = kw.get('interpreter') if interpreter is None: if not Utils.check_exe(ret[0], env=environ): self.fatal('Program %r is not executable' % ret) self.env[var] = ret else: self.env[var] = self.env[interpreter] + ret return ret @conf def find_binary(self, filenames, exts, paths): for f in filenames: for ext in exts: exe_name = f + ext if os.path.isabs(exe_name): if os.path.isfile(exe_name): return exe_name else: for path in paths: x = os.path.expanduser(os.path.join(path, exe_name)) if os.path.isfile(x): return x return None @conf def run_build(self, *k, **kw): """ Create a temporary build context to execute a build. A temporary reference to that build context is kept on self.test_bld for debugging purposes. The arguments to this function are passed to a single task generator for that build. Only three parameters are mandatory: :param features: features to pass to a task generator created in the build :type features: list of string :param compile_filename: file to create for the compilation (default: *test.c*) :type compile_filename: string :param code: input file contents :type code: string Though this function returns *0* by default, the build may bind attribute named *retval* on the build context object to return a particular value. See :py:func:`waflib.Tools.c_config.test_exec_fun` for example. The temporary builds creates a temporary folder; the name of that folder is calculated by hashing input arguments to this function, with the exception of :py:class:`waflib.ConfigSet.ConfigSet` objects which are used for both reading and writing values. This function also features a cache which is disabled by default; that cache relies on the hash value calculated as indicated above:: def options(opt): opt.add_option('--confcache', dest='confcache', default=0, action='count', help='Use a configuration cache') And execute the configuration with the following command-line:: $ waf configure --confcache """ buf = [] for key in sorted(kw.keys()): v = kw[key] if isinstance(v, ConfigSet.ConfigSet): # values are being written to, so they are excluded from contributing to the hash continue elif hasattr(v, '__call__'): buf.append(Utils.h_fun(v)) else: buf.append(str(v)) h = Utils.h_list(buf) dir = self.bldnode.abspath() + os.sep + (not Utils.is_win32 and '.' or '') + 'conf_check_' + Utils.to_hex(h) cachemode = kw.get('confcache', getattr(Options.options, 'confcache', None)) if not cachemode and os.path.exists(dir): shutil.rmtree(dir) try: os.makedirs(dir) except OSError: pass try: os.stat(dir) except OSError: self.fatal('cannot use the configuration test folder %r' % dir) if cachemode == 1: try: proj = ConfigSet.ConfigSet(os.path.join(dir, 'cache_run_build')) except EnvironmentError: pass else: ret = proj['cache_run_build'] if isinstance(ret, str) and ret.startswith('Test does not build'): self.fatal(ret) return ret bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) cls_name = kw.get('run_build_cls') or getattr(self, 'run_build_cls', 'build') self.test_bld = bld = Context.create_context(cls_name, top_dir=dir, out_dir=bdir) bld.init_dirs() bld.progress_bar = 0 bld.targets = '*' bld.logger = self.logger bld.all_envs.update(self.all_envs) # not really necessary bld.env = kw['env'] bld.kw = kw bld.conf = self kw['build_fun'](bld) ret = -1 try: try: bld.compile() except Errors.WafError: ret = 'Test does not build: %s' % traceback.format_exc() self.fatal(ret) else: ret = getattr(bld, 'retval', 0) finally: if cachemode: # cache the results each time proj = ConfigSet.ConfigSet() proj['cache_run_build'] = ret proj.store(os.path.join(dir, 'cache_run_build')) else: shutil.rmtree(dir) return ret @conf def ret_msg(self, msg, args): if isinstance(msg, str): return msg return msg(args) @conf def test(self, *k, **kw): if not 'env' in kw: kw['env'] = self.env.derive() # validate_c for example if kw.get('validate'): kw['validate'](kw) self.start_msg(kw['msg'], **kw) ret = None try: ret = self.run_build(*k, **kw) except self.errors.ConfigurationError: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: raise else: self.fatal('The configuration failed') else: kw['success'] = ret if kw.get('post_check'): ret = kw['post_check'](kw) if ret: self.end_msg(kw['errmsg'], 'YELLOW', **kw) self.fatal('The configuration failed %r' % ret) else: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/Context.py0000660000000000000000000005133400000000000021411 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2018 (ita) """ Classes and functions enabling the command system """ import os, re, sys from waflib import Utils, Errors, Logs import waflib.Node if sys.hexversion > 0x3040000: import types class imp(object): new_module = lambda x: types.ModuleType(x) else: import imp # the following 3 constants are updated on each new release (do not touch) HEXVERSION=0x2001500 """Constant updated on new releases""" WAFVERSION="2.0.21" """Constant updated on new releases""" WAFREVISION="edde20a6425a5c3eb6b47d5f3f5c4fbc93fed5f4" """Git revision when the waf version is updated""" WAFNAME="waf" """Application name displayed on --help""" ABI = 20 """Version of the build data cache file format (used in :py:const:`waflib.Context.DBFILE`)""" DBFILE = '.wafpickle-%s-%d-%d' % (sys.platform, sys.hexversion, ABI) """Name of the pickle file for storing the build data""" APPNAME = 'APPNAME' """Default application name (used by ``waf dist``)""" VERSION = 'VERSION' """Default application version (used by ``waf dist``)""" TOP = 'top' """The variable name for the top-level directory in wscript files""" OUT = 'out' """The variable name for the output directory in wscript files""" WSCRIPT_FILE = 'wscript' """Name of the waf script files""" launch_dir = '' """Directory from which waf has been called""" run_dir = '' """Location of the wscript file to use as the entry point""" top_dir = '' """Location of the project directory (top), if the project was configured""" out_dir = '' """Location of the build directory (out), if the project was configured""" waf_dir = '' """Directory containing the waf modules""" default_encoding = Utils.console_encoding() """Encoding to use when reading outputs from other processes""" g_module = None """ Module representing the top-level wscript file (see :py:const:`waflib.Context.run_dir`) """ STDOUT = 1 STDERR = -1 BOTH = 0 classes = [] """ List of :py:class:`waflib.Context.Context` subclasses that can be used as waf commands. The classes are added automatically by a metaclass. """ def create_context(cmd_name, *k, **kw): """ Returns a new :py:class:`waflib.Context.Context` instance corresponding to the given command. Used in particular by :py:func:`waflib.Scripting.run_command` :param cmd_name: command name :type cmd_name: string :param k: arguments to give to the context class initializer :type k: list :param k: keyword arguments to give to the context class initializer :type k: dict :return: Context object :rtype: :py:class:`waflib.Context.Context` """ for x in classes: if x.cmd == cmd_name: return x(*k, **kw) ctx = Context(*k, **kw) ctx.fun = cmd_name return ctx class store_context(type): """ Metaclass that registers command classes into the list :py:const:`waflib.Context.classes` Context classes must provide an attribute 'cmd' representing the command name, and a function attribute 'fun' representing the function name that the command uses. """ def __init__(cls, name, bases, dct): super(store_context, cls).__init__(name, bases, dct) name = cls.__name__ if name in ('ctx', 'Context'): return try: cls.cmd except AttributeError: raise Errors.WafError('Missing command for the context class %r (cmd)' % name) if not getattr(cls, 'fun', None): cls.fun = cls.cmd classes.insert(0, cls) ctx = store_context('ctx', (object,), {}) """Base class for all :py:class:`waflib.Context.Context` classes""" class Context(ctx): """ Default context for waf commands, and base class for new command contexts. Context objects are passed to top-level functions:: def foo(ctx): print(ctx.__class__.__name__) # waflib.Context.Context Subclasses must define the class attributes 'cmd' and 'fun': :param cmd: command to execute as in ``waf cmd`` :type cmd: string :param fun: function name to execute when the command is called :type fun: string .. inheritance-diagram:: waflib.Context.Context waflib.Build.BuildContext waflib.Build.InstallContext waflib.Build.UninstallContext waflib.Build.StepContext waflib.Build.ListContext waflib.Configure.ConfigurationContext waflib.Scripting.Dist waflib.Scripting.DistCheck waflib.Build.CleanContext """ errors = Errors """ Shortcut to :py:mod:`waflib.Errors` provided for convenience """ tools = {} """ A module cache for wscript files; see :py:meth:`Context.Context.load` """ def __init__(self, **kw): try: rd = kw['run_dir'] except KeyError: rd = run_dir # binds the context to the nodes in use to avoid a context singleton self.node_class = type('Nod3', (waflib.Node.Node,), {}) self.node_class.__module__ = 'waflib.Node' self.node_class.ctx = self self.root = self.node_class('', None) self.cur_script = None self.path = self.root.find_dir(rd) self.stack_path = [] self.exec_dict = {'ctx':self, 'conf':self, 'bld':self, 'opt':self} self.logger = None def finalize(self): """ Called to free resources such as logger files """ try: logger = self.logger except AttributeError: pass else: Logs.free_logger(logger) delattr(self, 'logger') def load(self, tool_list, *k, **kw): """ Loads a Waf tool as a module, and try calling the function named :py:const:`waflib.Context.Context.fun` from it. A ``tooldir`` argument may be provided as a list of module paths. :param tool_list: list of Waf tool names to load :type tool_list: list of string or space-separated string """ tools = Utils.to_list(tool_list) path = Utils.to_list(kw.get('tooldir', '')) with_sys_path = kw.get('with_sys_path', True) for t in tools: module = load_tool(t, path, with_sys_path=with_sys_path) fun = getattr(module, kw.get('name', self.fun), None) if fun: fun(self) def execute(self): """ Here, it calls the function name in the top-level wscript file. Most subclasses redefine this method to provide additional functionality. """ self.recurse([os.path.dirname(g_module.root_path)]) def pre_recurse(self, node): """ Method executed immediately before a folder is read by :py:meth:`waflib.Context.Context.recurse`. The current script is bound as a Node object on ``self.cur_script``, and the current path is bound to ``self.path`` :param node: script :type node: :py:class:`waflib.Node.Node` """ self.stack_path.append(self.cur_script) self.cur_script = node self.path = node.parent def post_recurse(self, node): """ Restores ``self.cur_script`` and ``self.path`` right after :py:meth:`waflib.Context.Context.recurse` terminates. :param node: script :type node: :py:class:`waflib.Node.Node` """ self.cur_script = self.stack_path.pop() if self.cur_script: self.path = self.cur_script.parent def recurse(self, dirs, name=None, mandatory=True, once=True, encoding=None): """ Runs user-provided functions from the supplied list of directories. The directories can be either absolute, or relative to the directory of the wscript file The methods :py:meth:`waflib.Context.Context.pre_recurse` and :py:meth:`waflib.Context.Context.post_recurse` are called immediately before and after a script has been executed. :param dirs: List of directories to visit :type dirs: list of string or space-separated string :param name: Name of function to invoke from the wscript :type name: string :param mandatory: whether sub wscript files are required to exist :type mandatory: bool :param once: read the script file once for a particular context :type once: bool """ try: cache = self.recurse_cache except AttributeError: cache = self.recurse_cache = {} for d in Utils.to_list(dirs): if not os.path.isabs(d): # absolute paths only d = os.path.join(self.path.abspath(), d) WSCRIPT = os.path.join(d, WSCRIPT_FILE) WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun) node = self.root.find_node(WSCRIPT_FUN) if node and (not once or node not in cache): cache[node] = True self.pre_recurse(node) try: function_code = node.read('r', encoding) exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict) finally: self.post_recurse(node) elif not node: node = self.root.find_node(WSCRIPT) tup = (node, name or self.fun) if node and (not once or tup not in cache): cache[tup] = True self.pre_recurse(node) try: wscript_module = load_module(node.abspath(), encoding=encoding) user_function = getattr(wscript_module, (name or self.fun), None) if not user_function: if not mandatory: continue raise Errors.WafError('No function %r defined in %s' % (name or self.fun, node.abspath())) user_function(self) finally: self.post_recurse(node) elif not node: if not mandatory: continue try: os.listdir(d) except OSError: raise Errors.WafError('Cannot read the folder %r' % d) raise Errors.WafError('No wscript file in directory %s' % d) def log_command(self, cmd, kw): if Logs.verbose: fmt = os.environ.get('WAF_CMD_FORMAT') if fmt == 'string': if not isinstance(cmd, str): cmd = Utils.shell_escape(cmd) Logs.debug('runner: %r', cmd) Logs.debug('runner_env: kw=%s', kw) def exec_command(self, cmd, **kw): """ Runs an external process and returns the exit status:: def run(tsk): ret = tsk.generator.bld.exec_command('touch foo.txt') return ret If the context has the attribute 'log', then captures and logs the process stderr/stdout. Unlike :py:meth:`waflib.Context.Context.cmd_and_log`, this method does not return the stdout/stderr values captured. :param cmd: command argument for subprocess.Popen :type cmd: string or list :param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate. :type kw: dict :returns: process exit status :rtype: integer :raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process :raises: :py:class:`waflib.Errors.WafError` in case of execution failure """ subprocess = Utils.subprocess kw['shell'] = isinstance(cmd, str) self.log_command(cmd, kw) if self.logger: self.logger.info(cmd) if 'stdout' not in kw: kw['stdout'] = subprocess.PIPE if 'stderr' not in kw: kw['stderr'] = subprocess.PIPE if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]): raise Errors.WafError('Program %s not found!' % cmd[0]) cargs = {} if 'timeout' in kw: if sys.hexversion >= 0x3030000: cargs['timeout'] = kw['timeout'] if not 'start_new_session' in kw: kw['start_new_session'] = True del kw['timeout'] if 'input' in kw: if kw['input']: cargs['input'] = kw['input'] kw['stdin'] = subprocess.PIPE del kw['input'] if 'cwd' in kw: if not isinstance(kw['cwd'], str): kw['cwd'] = kw['cwd'].abspath() encoding = kw.pop('decode_as', default_encoding) try: ret, out, err = Utils.run_process(cmd, kw, cargs) except Exception as e: raise Errors.WafError('Execution failure: %s' % str(e), ex=e) if out: if not isinstance(out, str): out = out.decode(encoding, errors='replace') if self.logger: self.logger.debug('out: %s', out) else: Logs.info(out, extra={'stream':sys.stdout, 'c1': ''}) if err: if not isinstance(err, str): err = err.decode(encoding, errors='replace') if self.logger: self.logger.error('err: %s' % err) else: Logs.info(err, extra={'stream':sys.stderr, 'c1': ''}) return ret def cmd_and_log(self, cmd, **kw): """ Executes a process and returns stdout/stderr if the execution is successful. An exception is thrown when the exit status is non-0. In that case, both stderr and stdout will be bound to the WafError object (configuration tests):: def configure(conf): out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH) (out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH) (out, err) = conf.cmd_and_log(cmd, input='\\n'.encode(), output=waflib.Context.STDOUT) try: conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH) except Errors.WafError as e: print(e.stdout, e.stderr) :param cmd: args for subprocess.Popen :type cmd: list or string :param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate. :type kw: dict :returns: a tuple containing the contents of stdout and stderr :rtype: string :raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process :raises: :py:class:`waflib.Errors.WafError` in case of execution failure; stdout/stderr/returncode are bound to the exception object """ subprocess = Utils.subprocess kw['shell'] = isinstance(cmd, str) self.log_command(cmd, kw) quiet = kw.pop('quiet', None) to_ret = kw.pop('output', STDOUT) if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]): raise Errors.WafError('Program %r not found!' % cmd[0]) kw['stdout'] = kw['stderr'] = subprocess.PIPE if quiet is None: self.to_log(cmd) cargs = {} if 'timeout' in kw: if sys.hexversion >= 0x3030000: cargs['timeout'] = kw['timeout'] if not 'start_new_session' in kw: kw['start_new_session'] = True del kw['timeout'] if 'input' in kw: if kw['input']: cargs['input'] = kw['input'] kw['stdin'] = subprocess.PIPE del kw['input'] if 'cwd' in kw: if not isinstance(kw['cwd'], str): kw['cwd'] = kw['cwd'].abspath() encoding = kw.pop('decode_as', default_encoding) try: ret, out, err = Utils.run_process(cmd, kw, cargs) except Exception as e: raise Errors.WafError('Execution failure: %s' % str(e), ex=e) if not isinstance(out, str): out = out.decode(encoding, errors='replace') if not isinstance(err, str): err = err.decode(encoding, errors='replace') if out and quiet != STDOUT and quiet != BOTH: self.to_log('out: %s' % out) if err and quiet != STDERR and quiet != BOTH: self.to_log('err: %s' % err) if ret: e = Errors.WafError('Command %r returned %r' % (cmd, ret)) e.returncode = ret e.stderr = err e.stdout = out raise e if to_ret == BOTH: return (out, err) elif to_ret == STDERR: return err return out def fatal(self, msg, ex=None): """ Prints an error message in red and stops command execution; this is usually used in the configuration section:: def configure(conf): conf.fatal('a requirement is missing') :param msg: message to display :type msg: string :param ex: optional exception object :type ex: exception :raises: :py:class:`waflib.Errors.ConfigurationError` """ if self.logger: self.logger.info('from %s: %s' % (self.path.abspath(), msg)) try: logfile = self.logger.handlers[0].baseFilename except AttributeError: pass else: if os.environ.get('WAF_PRINT_FAILURE_LOG'): # see #1930 msg = 'Log from (%s):\n%s\n' % (logfile, Utils.readf(logfile)) else: msg = '%s\n(complete log in %s)' % (msg, logfile) raise self.errors.ConfigurationError(msg, ex=ex) def to_log(self, msg): """ Logs information to the logger (if present), or to stderr. Empty messages are not printed:: def build(bld): bld.to_log('starting the build') Provide a logger on the context class or override this method if necessary. :param msg: message :type msg: string """ if not msg: return if self.logger: self.logger.info(msg) else: sys.stderr.write(str(msg)) sys.stderr.flush() def msg(self, *k, **kw): """ Prints a configuration message of the form ``msg: result``. The second part of the message will be in colors. The output can be disabled easily by setting ``in_msg`` to a positive value:: def configure(conf): self.in_msg = 1 conf.msg('Checking for library foo', 'ok') # no output :param msg: message to display to the user :type msg: string :param result: result to display :type result: string or boolean :param color: color to use, see :py:const:`waflib.Logs.colors_lst` :type color: string """ try: msg = kw['msg'] except KeyError: msg = k[0] self.start_msg(msg, **kw) try: result = kw['result'] except KeyError: result = k[1] color = kw.get('color') if not isinstance(color, str): color = result and 'GREEN' or 'YELLOW' self.end_msg(result, color, **kw) def start_msg(self, *k, **kw): """ Prints the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg` """ if kw.get('quiet'): return msg = kw.get('msg') or k[0] try: if self.in_msg: self.in_msg += 1 return except AttributeError: self.in_msg = 0 self.in_msg += 1 try: self.line_just = max(self.line_just, len(msg)) except AttributeError: self.line_just = max(40, len(msg)) for x in (self.line_just * '-', msg): self.to_log(x) Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='') def end_msg(self, *k, **kw): """Prints the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`""" if kw.get('quiet'): return self.in_msg -= 1 if self.in_msg: return result = kw.get('result') or k[0] defcolor = 'GREEN' if result is True: msg = 'ok' elif not result: msg = 'not found' defcolor = 'YELLOW' else: msg = str(result) self.to_log(msg) try: color = kw['color'] except KeyError: if len(k) > 1 and k[1] in Logs.colors_lst: # compatibility waf 1.7 color = k[1] else: color = defcolor Logs.pprint(color, msg) def load_special_tools(self, var, ban=[]): """ Loads third-party extensions modules for certain programming languages by trying to list certain files in the extras/ directory. This method is typically called once for a programming language group, see for example :py:mod:`waflib.Tools.compiler_c` :param var: glob expression, for example 'cxx\\_\\*.py' :type var: string :param ban: list of exact file names to exclude :type ban: list of string """ if os.path.isdir(waf_dir): lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var) for x in lst: if not x.name in ban: load_tool(x.name.replace('.py', '')) else: from zipfile import PyZipFile waflibs = PyZipFile(waf_dir) lst = waflibs.namelist() for x in lst: if not re.match('waflib/extras/%s' % var.replace('*', '.*'), var): continue f = os.path.basename(x) doban = False for b in ban: r = b.replace('*', '.*') if re.match(r, f): doban = True if not doban: f = f.replace('.py', '') load_tool(f) cache_modules = {} """ Dictionary holding already loaded modules (wscript), indexed by their absolute path. The modules are added automatically by :py:func:`waflib.Context.load_module` """ def load_module(path, encoding=None): """ Loads a wscript file as a python module. This method caches results in :py:attr:`waflib.Context.cache_modules` :param path: file path :type path: string :return: Loaded Python module :rtype: module """ try: return cache_modules[path] except KeyError: pass module = imp.new_module(WSCRIPT_FILE) try: code = Utils.readf(path, m='r', encoding=encoding) except EnvironmentError: raise Errors.WafError('Could not read the file %r' % path) module_dir = os.path.dirname(path) sys.path.insert(0, module_dir) try: exec(compile(code, path, 'exec'), module.__dict__) finally: sys.path.remove(module_dir) cache_modules[path] = module return module def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True): """ Imports a Waf tool as a python module, and stores it in the dict :py:const:`waflib.Context.Context.tools` :type tool: string :param tool: Name of the tool :type tooldir: list :param tooldir: List of directories to search for the tool module :type with_sys_path: boolean :param with_sys_path: whether or not to search the regular sys.path, besides waf_dir and potentially given tooldirs """ if tool == 'java': tool = 'javaw' # jython else: tool = tool.replace('++', 'xx') if not with_sys_path: back_path = sys.path sys.path = [] try: if tooldir: assert isinstance(tooldir, list) sys.path = tooldir + sys.path try: __import__(tool) except ImportError as e: e.waf_sys_path = list(sys.path) raise finally: for d in tooldir: sys.path.remove(d) ret = sys.modules[tool] Context.tools[tool] = ret return ret else: if not with_sys_path: sys.path.insert(0, waf_dir) try: for x in ('waflib.Tools.%s', 'waflib.extras.%s', 'waflib.%s', '%s'): try: __import__(x % tool) break except ImportError: x = None else: # raise an exception __import__(tool) except ImportError as e: e.waf_sys_path = list(sys.path) raise finally: if not with_sys_path: sys.path.remove(waf_dir) ret = sys.modules[x % tool] Context.tools[tool] = ret return ret finally: if not with_sys_path: sys.path += back_path ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Errors.py0000660000000000000000000000326100000000000021235 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2018 (ita) """ Exceptions used in the Waf code """ import traceback, sys class WafError(Exception): """Base class for all Waf errors""" def __init__(self, msg='', ex=None): """ :param msg: error message :type msg: string :param ex: exception causing this error (optional) :type ex: exception """ Exception.__init__(self) self.msg = msg assert not isinstance(msg, Exception) self.stack = [] if ex: if not msg: self.msg = str(ex) if isinstance(ex, WafError): self.stack = ex.stack else: self.stack = traceback.extract_tb(sys.exc_info()[2]) self.stack += traceback.extract_stack()[:-1] self.verbose_msg = ''.join(traceback.format_list(self.stack)) def __str__(self): return str(self.msg) class BuildError(WafError): """Error raised during the build and install phases""" def __init__(self, error_tasks=[]): """ :param error_tasks: tasks that could not complete normally :type error_tasks: list of task objects """ self.tasks = error_tasks WafError.__init__(self, self.format_error()) def format_error(self): """Formats the error messages from the tasks that failed""" lst = ['Build failed'] for tsk in self.tasks: txt = tsk.format_error() if txt: lst.append(txt) return '\n'.join(lst) class ConfigurationError(WafError): """Configuration exception raised in particular by :py:meth:`waflib.Context.Context.fatal`""" pass class TaskRescan(WafError): """Task-specific exception type signalling required signature recalculations""" pass class TaskNotReady(WafError): """Task-specific exception type signalling that task signatures cannot be computed""" pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Logs.py0000660000000000000000000002303300000000000020664 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ logging, colors, terminal width and pretty-print """ import os, re, traceback, sys from waflib import Utils, ansiterm if not os.environ.get('NOSYNC', False): # synchronized output is nearly mandatory to prevent garbled output if sys.stdout.isatty() and id(sys.stdout) == id(sys.__stdout__): sys.stdout = ansiterm.AnsiTerm(sys.stdout) if sys.stderr.isatty() and id(sys.stderr) == id(sys.__stderr__): sys.stderr = ansiterm.AnsiTerm(sys.stderr) # import the logging module after since it holds a reference on sys.stderr # in case someone uses the root logger import logging LOG_FORMAT = os.environ.get('WAF_LOG_FORMAT', '%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s') HOUR_FORMAT = os.environ.get('WAF_HOUR_FORMAT', '%H:%M:%S') zones = [] """ See :py:class:`waflib.Logs.log_filter` """ verbose = 0 """ Global verbosity level, see :py:func:`waflib.Logs.debug` and :py:func:`waflib.Logs.error` """ colors_lst = { 'USE' : True, 'BOLD' :'\x1b[01;1m', 'RED' :'\x1b[01;31m', 'GREEN' :'\x1b[32m', 'YELLOW':'\x1b[33m', 'PINK' :'\x1b[35m', 'BLUE' :'\x1b[01;34m', 'CYAN' :'\x1b[36m', 'GREY' :'\x1b[37m', 'NORMAL':'\x1b[0m', 'cursor_on' :'\x1b[?25h', 'cursor_off' :'\x1b[?25l', } indicator = '\r\x1b[K%s%s%s' try: unicode except NameError: unicode = None def enable_colors(use): """ If *1* is given, then the system will perform a few verifications before enabling colors, such as checking whether the interpreter is running in a terminal. A value of zero will disable colors, and a value above *1* will force colors. :param use: whether to enable colors or not :type use: integer """ if use == 1: if not (sys.stderr.isatty() or sys.stdout.isatty()): use = 0 if Utils.is_win32 and os.name != 'java': term = os.environ.get('TERM', '') # has ansiterm else: term = os.environ.get('TERM', 'dumb') if term in ('dumb', 'emacs'): use = 0 if use >= 1: os.environ['TERM'] = 'vt100' colors_lst['USE'] = use # If console packages are available, replace the dummy function with a real # implementation try: get_term_cols = ansiterm.get_term_cols except AttributeError: def get_term_cols(): return 80 get_term_cols.__doc__ = """ Returns the console width in characters. :return: the number of characters per line :rtype: int """ def get_color(cl): """ Returns the ansi sequence corresponding to the given color name. An empty string is returned when coloring is globally disabled. :param cl: color name in capital letters :type cl: string """ if colors_lst['USE']: return colors_lst.get(cl, '') return '' class color_dict(object): """attribute-based color access, eg: colors.PINK""" def __getattr__(self, a): return get_color(a) def __call__(self, a): return get_color(a) colors = color_dict() re_log = re.compile(r'(\w+): (.*)', re.M) class log_filter(logging.Filter): """ Waf logs are of the form 'name: message', and can be filtered by 'waf --zones=name'. For example, the following:: from waflib import Logs Logs.debug('test: here is a message') Will be displayed only when executing:: $ waf --zones=test """ def __init__(self, name=''): logging.Filter.__init__(self, name) def filter(self, rec): """ Filters log records by zone and by logging level :param rec: log entry """ rec.zone = rec.module if rec.levelno >= logging.INFO: return True m = re_log.match(rec.msg) if m: rec.zone = m.group(1) rec.msg = m.group(2) if zones: return getattr(rec, 'zone', '') in zones or '*' in zones elif not verbose > 2: return False return True class log_handler(logging.StreamHandler): """Dispatches messages to stderr/stdout depending on the severity level""" def emit(self, record): """ Delegates the functionality to :py:meth:`waflib.Log.log_handler.emit_override` """ # default implementation try: try: self.stream = record.stream except AttributeError: if record.levelno >= logging.WARNING: record.stream = self.stream = sys.stderr else: record.stream = self.stream = sys.stdout self.emit_override(record) self.flush() except (KeyboardInterrupt, SystemExit): raise except: # from the python library -_- self.handleError(record) def emit_override(self, record, **kw): """ Writes the log record to the desired stream (stderr/stdout) """ self.terminator = getattr(record, 'terminator', '\n') stream = self.stream if unicode: # python2 msg = self.formatter.format(record) fs = '%s' + self.terminator try: if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)): fs = fs.decode(stream.encoding) try: stream.write(fs % msg) except UnicodeEncodeError: stream.write((fs % msg).encode(stream.encoding)) else: stream.write(fs % msg) except UnicodeError: stream.write((fs % msg).encode('utf-8')) else: logging.StreamHandler.emit(self, record) class formatter(logging.Formatter): """Simple log formatter which handles colors""" def __init__(self): logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT) def format(self, rec): """ Formats records and adds colors as needed. The records do not get a leading hour format if the logging level is above *INFO*. """ try: msg = rec.msg.decode('utf-8') except Exception: msg = rec.msg use = colors_lst['USE'] if (use == 1 and rec.stream.isatty()) or use == 2: c1 = getattr(rec, 'c1', None) if c1 is None: c1 = '' if rec.levelno >= logging.ERROR: c1 = colors.RED elif rec.levelno >= logging.WARNING: c1 = colors.YELLOW elif rec.levelno >= logging.INFO: c1 = colors.GREEN c2 = getattr(rec, 'c2', colors.NORMAL) msg = '%s%s%s' % (c1, msg, c2) else: # remove single \r that make long lines in text files # and other terminal commands msg = re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))', '', msg) if rec.levelno >= logging.INFO: # the goal of this is to format without the leading "Logs, hour" prefix if rec.args: try: return msg % rec.args except UnicodeDecodeError: return msg.encode('utf-8') % rec.args return msg rec.msg = msg rec.c1 = colors.PINK rec.c2 = colors.NORMAL return logging.Formatter.format(self, rec) log = None """global logger for Logs.debug, Logs.error, etc""" def debug(*k, **kw): """ Wraps logging.debug and discards messages if the verbosity level :py:attr:`waflib.Logs.verbose` ≤ 0 """ if verbose: k = list(k) k[0] = k[0].replace('\n', ' ') log.debug(*k, **kw) def error(*k, **kw): """ Wrap logging.errors, adds the stack trace when the verbosity level :py:attr:`waflib.Logs.verbose` ≥ 2 """ log.error(*k, **kw) if verbose > 2: st = traceback.extract_stack() if st: st = st[:-1] buf = [] for filename, lineno, name, line in st: buf.append(' File %r, line %d, in %s' % (filename, lineno, name)) if line: buf.append(' %s' % line.strip()) if buf: log.error('\n'.join(buf)) def warn(*k, **kw): """ Wraps logging.warning """ log.warning(*k, **kw) def info(*k, **kw): """ Wraps logging.info """ log.info(*k, **kw) def init_log(): """ Initializes the logger :py:attr:`waflib.Logs.log` """ global log log = logging.getLogger('waflib') log.handlers = [] log.filters = [] hdlr = log_handler() hdlr.setFormatter(formatter()) log.addHandler(hdlr) log.addFilter(log_filter()) log.setLevel(logging.DEBUG) def make_logger(path, name): """ Creates a simple logger, which is often used to redirect the context command output:: from waflib import Logs bld.logger = Logs.make_logger('test.log', 'build') bld.check(header_name='sadlib.h', features='cxx cprogram', mandatory=False) # have the file closed immediately Logs.free_logger(bld.logger) # stop logging bld.logger = None The method finalize() of the command will try to free the logger, if any :param path: file name to write the log output to :type path: string :param name: logger name (loggers are reused) :type name: string """ logger = logging.getLogger(name) if sys.hexversion > 0x3000000: encoding = sys.stdout.encoding else: encoding = None hdlr = logging.FileHandler(path, 'w', encoding=encoding) formatter = logging.Formatter('%(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) return logger def make_mem_logger(name, to_log, size=8192): """ Creates a memory logger to avoid writing concurrently to the main logger """ from logging.handlers import MemoryHandler logger = logging.getLogger(name) hdlr = MemoryHandler(size, target=to_log) formatter = logging.Formatter('%(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.memhandler = hdlr logger.setLevel(logging.DEBUG) return logger def free_logger(logger): """ Frees the resources held by the loggers created through make_logger or make_mem_logger. This is used for file cleanup and for handler removal (logger objects are re-used). """ try: for x in logger.handlers: x.close() logger.removeHandler(x) except Exception: pass def pprint(col, msg, label='', sep='\n'): """ Prints messages in color immediately on stderr:: from waflib import Logs Logs.pprint('RED', 'Something bad just happened') :param col: color name to use in :py:const:`Logs.colors_lst` :type col: string :param msg: message to display :type msg: string or a value that can be printed by %s :param label: a message to add after the colored output :type label: string :param sep: a string to append at the end (line separator) :type sep: string """ info('%s%s%s %s', colors(col), msg, colors.NORMAL, label, extra={'terminator':sep}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Node.py0000660000000000000000000006162500000000000020656 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Node: filesystem structure #. Each file/folder is represented by exactly one node. #. Some potential class properties are stored on :py:class:`waflib.Build.BuildContext` : nodes to depend on, etc. Unused class members can increase the `.wafpickle` file size sensibly. #. Node objects should never be created directly, use the methods :py:func:`Node.make_node` or :py:func:`Node.find_node` for the low-level operations #. The methods :py:func:`Node.find_resource`, :py:func:`Node.find_dir` :py:func:`Node.find_or_declare` must be used when a build context is present #. Each instance of :py:class:`waflib.Context.Context` has a unique :py:class:`Node` subclass required for serialization. (:py:class:`waflib.Node.Nod3`, see the :py:class:`waflib.Context.Context` initializer). A reference to the context owning a node is held as *self.ctx* """ import os, re, sys, shutil from waflib import Utils, Errors exclude_regs = ''' **/*~ **/#*# **/.#* **/%*% **/._* **/*.swp **/CVS **/CVS/** **/.cvsignore **/SCCS **/SCCS/** **/vssver.scc **/.svn **/.svn/** **/BitKeeper **/.git **/.git/** **/.gitignore **/.bzr **/.bzrignore **/.bzr/** **/.hg **/.hg/** **/_MTN **/_MTN/** **/.arch-ids **/{arch} **/_darcs **/_darcs/** **/.intlcache **/.DS_Store''' """ Ant patterns for files and folders to exclude while doing the recursive traversal in :py:meth:`waflib.Node.Node.ant_glob` """ def ant_matcher(s, ignorecase): reflags = re.I if ignorecase else 0 ret = [] for x in Utils.to_list(s): x = x.replace('\\', '/').replace('//', '/') if x.endswith('/'): x += '**' accu = [] for k in x.split('/'): if k == '**': accu.append(k) else: k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.').replace('+', '\\+') k = '^%s$' % k try: exp = re.compile(k, flags=reflags) except Exception as e: raise Errors.WafError('Invalid pattern: %s' % k, e) else: accu.append(exp) ret.append(accu) return ret def ant_sub_filter(name, nn): ret = [] for lst in nn: if not lst: pass elif lst[0] == '**': ret.append(lst) if len(lst) > 1: if lst[1].match(name): ret.append(lst[2:]) else: ret.append([]) elif lst[0].match(name): ret.append(lst[1:]) return ret def ant_sub_matcher(name, pats): nacc = ant_sub_filter(name, pats[0]) nrej = ant_sub_filter(name, pats[1]) if [] in nrej: nacc = [] return [nacc, nrej] class Node(object): """ This class is organized in two parts: * The basic methods meant for filesystem access (compute paths, create folders, etc) * The methods bound to a :py:class:`waflib.Build.BuildContext` (require ``bld.srcnode`` and ``bld.bldnode``) """ dict_class = dict """ Subclasses can provide a dict class to enable case insensitivity for example. """ __slots__ = ('name', 'parent', 'children', 'cache_abspath', 'cache_isdir') def __init__(self, name, parent): """ .. note:: Use :py:func:`Node.make_node` or :py:func:`Node.find_node` instead of calling this constructor """ self.name = name self.parent = parent if parent: if name in parent.children: raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent)) parent.children[name] = self def __setstate__(self, data): "Deserializes node information, used for persistence" self.name = data[0] self.parent = data[1] if data[2] is not None: # Issue 1480 self.children = self.dict_class(data[2]) def __getstate__(self): "Serializes node information, used for persistence" return (self.name, self.parent, getattr(self, 'children', None)) def __str__(self): """ String representation (abspath), for debugging purposes :rtype: string """ return self.abspath() def __repr__(self): """ String representation (abspath), for debugging purposes :rtype: string """ return self.abspath() def __copy__(self): """ Provided to prevent nodes from being copied :raises: :py:class:`waflib.Errors.WafError` """ raise Errors.WafError('nodes are not supposed to be copied') def read(self, flags='r', encoding='latin-1'): """ Reads and returns the contents of the file represented by this node, see :py:func:`waflib.Utils.readf`:: def build(bld): bld.path.find_node('wscript').read() :param flags: Open mode :type flags: string :param encoding: encoding value for Python3 :type encoding: string :rtype: string or bytes :return: File contents """ return Utils.readf(self.abspath(), flags, encoding) def write(self, data, flags='w', encoding='latin-1'): """ Writes data to the file represented by this node, see :py:func:`waflib.Utils.writef`:: def build(bld): bld.path.make_node('foo.txt').write('Hello, world!') :param data: data to write :type data: string :param flags: Write mode :type flags: string :param encoding: encoding value for Python3 :type encoding: string """ Utils.writef(self.abspath(), data, flags, encoding) def read_json(self, convert=True, encoding='utf-8'): """ Reads and parses the contents of this node as JSON (Python ≥ 2.6):: def build(bld): bld.path.find_node('abc.json').read_json() Note that this by default automatically decodes unicode strings on Python2, unlike what the Python JSON module does. :type convert: boolean :param convert: Prevents decoding of unicode strings on Python2 :type encoding: string :param encoding: The encoding of the file to read. This default to UTF8 as per the JSON standard :rtype: object :return: Parsed file contents """ import json # Python 2.6 and up object_pairs_hook = None if convert and sys.hexversion < 0x3000000: try: _type = unicode except NameError: _type = str def convert(value): if isinstance(value, list): return [convert(element) for element in value] elif isinstance(value, _type): return str(value) else: return value def object_pairs(pairs): return dict((str(pair[0]), convert(pair[1])) for pair in pairs) object_pairs_hook = object_pairs return json.loads(self.read(encoding=encoding), object_pairs_hook=object_pairs_hook) def write_json(self, data, pretty=True): """ Writes a python object as JSON to disk (Python ≥ 2.6) as UTF-8 data (JSON standard):: def build(bld): bld.path.find_node('xyz.json').write_json(199) :type data: object :param data: The data to write to disk :type pretty: boolean :param pretty: Determines if the JSON will be nicely space separated """ import json # Python 2.6 and up indent = 2 separators = (',', ': ') sort_keys = pretty newline = os.linesep if not pretty: indent = None separators = (',', ':') newline = '' output = json.dumps(data, indent=indent, separators=separators, sort_keys=sort_keys) + newline self.write(output, encoding='utf-8') def exists(self): """ Returns whether the Node is present on the filesystem :rtype: bool """ return os.path.exists(self.abspath()) def isdir(self): """ Returns whether the Node represents a folder :rtype: bool """ return os.path.isdir(self.abspath()) def chmod(self, val): """ Changes the file/dir permissions:: def build(bld): bld.path.chmod(493) # 0755 """ os.chmod(self.abspath(), val) def delete(self, evict=True): """ Removes the file/folder from the filesystem (equivalent to `rm -rf`), and remove this object from the Node tree. Do not use this object after calling this method. """ try: try: if os.path.isdir(self.abspath()): shutil.rmtree(self.abspath()) else: os.remove(self.abspath()) except OSError: if os.path.exists(self.abspath()): raise finally: if evict: self.evict() def evict(self): """ Removes this node from the Node tree """ del self.parent.children[self.name] def suffix(self): """ Returns the file rightmost extension, for example `a.b.c.d → .d` :rtype: string """ k = max(0, self.name.rfind('.')) return self.name[k:] def height(self): """ Returns the depth in the folder hierarchy from the filesystem root or from all the file drives :returns: filesystem depth :rtype: integer """ d = self val = -1 while d: d = d.parent val += 1 return val def listdir(self): """ Lists the folder contents :returns: list of file/folder names ordered alphabetically :rtype: list of string """ lst = Utils.listdir(self.abspath()) lst.sort() return lst def mkdir(self): """ Creates a folder represented by this node. Intermediate folders are created as needed. :raises: :py:class:`waflib.Errors.WafError` when the folder is missing """ if self.isdir(): return try: self.parent.mkdir() except OSError: pass if self.name: try: os.makedirs(self.abspath()) except OSError: pass if not self.isdir(): raise Errors.WafError('Could not create the directory %r' % self) try: self.children except AttributeError: self.children = self.dict_class() def find_node(self, lst): """ Finds a node on the file system (files or folders), and creates the corresponding Node objects if it exists :param lst: relative path :type lst: string or list of string :returns: The corresponding Node object or None if no entry was found on the filesystem :rtype: :py:class:´waflib.Node.Node´ """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] if lst and lst[0].startswith('\\\\') and not self.parent: node = self.ctx.root.make_node(lst[0]) node.cache_isdir = True return node.find_node(lst[1:]) cur = self for x in lst: if x == '..': cur = cur.parent or cur continue try: ch = cur.children except AttributeError: cur.children = self.dict_class() else: try: cur = ch[x] continue except KeyError: pass # optimistic: create the node first then look if it was correct to do so cur = self.__class__(x, cur) if not cur.exists(): cur.evict() return None if not cur.exists(): cur.evict() return None return cur def make_node(self, lst): """ Returns or creates a Node object corresponding to the input path without considering the filesystem. :param lst: relative path :type lst: string or list of string :rtype: :py:class:´waflib.Node.Node´ """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur continue try: cur = cur.children[x] except AttributeError: cur.children = self.dict_class() except KeyError: pass else: continue cur = self.__class__(x, cur) return cur def search_node(self, lst): """ Returns a Node previously defined in the data structure. The filesystem is not considered. :param lst: relative path :type lst: string or list of string :rtype: :py:class:´waflib.Node.Node´ or None if there is no entry in the Node datastructure """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur else: try: cur = cur.children[x] except (AttributeError, KeyError): return None return cur def path_from(self, node): """ Path of this node seen from the other:: def build(bld): n1 = bld.path.find_node('foo/bar/xyz.txt') n2 = bld.path.find_node('foo/stuff/') n1.path_from(n2) # '../bar/xyz.txt' :param node: path to use as a reference :type node: :py:class:`waflib.Node.Node` :returns: a relative path or an absolute one if that is better :rtype: string """ c1 = self c2 = node c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while not c1 is c2: lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent if c1.parent: lst.extend(['..'] * up) lst.reverse() return os.sep.join(lst) or '.' else: return self.abspath() def abspath(self): """ Returns the absolute path. A cache is kept in the context as ``cache_node_abspath`` :rtype: string """ try: return self.cache_abspath except AttributeError: pass # think twice before touching this (performance + complexity + correctness) if not self.parent: val = os.sep elif not self.parent.name: val = os.sep + self.name else: val = self.parent.abspath() + os.sep + self.name self.cache_abspath = val return val if Utils.is_win32: def abspath(self): try: return self.cache_abspath except AttributeError: pass if not self.parent: val = '' elif not self.parent.name: val = self.name + os.sep else: val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name self.cache_abspath = val return val def is_child_of(self, node): """ Returns whether the object belongs to a subtree of the input node:: def build(bld): node = bld.path.find_node('wscript') node.is_child_of(bld.path) # True :param node: path to use as a reference :type node: :py:class:`waflib.Node.Node` :rtype: bool """ p = self diff = self.height() - node.height() while diff > 0: diff -= 1 p = p.parent return p is node def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): """ Recursive method used by :py:meth:`waflib.Node.ant_glob`. :param accept: function used for accepting/rejecting a node, returns the patterns that can be still accepted in recursion :type accept: function :param maxdepth: maximum depth in the filesystem (25) :type maxdepth: int :param pats: list of patterns to accept and list of patterns to exclude :type pats: tuple :param dir: return folders too (False by default) :type dir: bool :param src: return files (True by default) :type src: bool :param remove: remove files/folders that do not exist (True by default) :type remove: bool :param quiet: disable build directory traversal warnings (verbose mode) :type quiet: bool :returns: A generator object to iterate from :rtype: iterator """ dircont = self.listdir() try: lst = set(self.children.keys()) except AttributeError: self.children = self.dict_class() else: if remove: for x in lst - set(dircont): self.children[x].evict() for name in dircont: npats = accept(name, pats) if npats and npats[0]: accepted = [] in npats[0] node = self.make_node([name]) isdir = node.isdir() if accepted: if isdir: if dir: yield node elif src: yield node if isdir: node.cache_isdir = True if maxdepth: for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove, quiet=quiet): yield k def ant_glob(self, *k, **kw): """ Finds files across folders and returns Node objects: * ``**/*`` find all files recursively * ``**/*.class`` find all files ending by .class * ``..`` find files having two dot characters For example:: def configure(cfg): # find all .cpp files cfg.path.ant_glob('**/*.cpp') # find particular files from the root filesystem (can be slow) cfg.root.ant_glob('etc/*.txt') # simple exclusion rule example cfg.path.ant_glob('*.c*', excl=['*.c'], src=True, dir=False) For more information about the patterns, consult http://ant.apache.org/manual/dirtasks.html Please remember that the '..' sequence does not represent the parent directory:: def configure(cfg): cfg.path.ant_glob('../*.h') # incorrect cfg.path.parent.ant_glob('*.h') # correct The Node structure is itself a filesystem cache, so certain precautions must be taken while matching files in the build or installation phases. Nodes objects that do have a corresponding file or folder are garbage-collected by default. This garbage collection is usually required to prevent returning files that do not exist anymore. Yet, this may also remove Node objects of files that are yet-to-be built. This typically happens when trying to match files in the build directory, but there are also cases when files are created in the source directory. Run ``waf -v`` to display any warnings, and try consider passing ``remove=False`` when matching files in the build directory. Since ant_glob can traverse both source and build folders, it is a best practice to call this method only from the most specific build node:: def build(bld): # traverses the build directory, may need ``remove=False``: bld.path.ant_glob('project/dir/**/*.h') # better, no accidental build directory traversal: bld.path.find_node('project/dir').ant_glob('**/*.h') # best In addition, files and folders are listed immediately. When matching files in the build folders, consider passing ``generator=True`` so that the generator object returned can defer computation to a later stage. For example:: def build(bld): bld(rule='tar xvf ${SRC}', source='arch.tar') bld.add_group() gen = bld.bldnode.ant_glob("*.h", generator=True, remove=True) # files will be listed only after the arch.tar is unpacked bld(rule='ls ${SRC}', source=gen, name='XYZ') :param incl: ant patterns or list of patterns to include :type incl: string or list of strings :param excl: ant patterns or list of patterns to exclude :type excl: string or list of strings :param dir: return folders too (False by default) :type dir: bool :param src: return files (True by default) :type src: bool :param maxdepth: maximum depth of recursion :type maxdepth: int :param ignorecase: ignore case while matching (False by default) :type ignorecase: bool :param generator: Whether to evaluate the Nodes lazily :type generator: bool :param remove: remove files/folders that do not exist (True by default) :type remove: bool :param quiet: disable build directory traversal warnings (verbose mode) :type quiet: bool :returns: The corresponding Node objects as a list or as a generator object (generator=True) :rtype: by default, list of :py:class:`waflib.Node.Node` instances """ src = kw.get('src', True) dir = kw.get('dir') excl = kw.get('excl', exclude_regs) incl = k and k[0] or kw.get('incl', '**') remove = kw.get('remove', True) maxdepth = kw.get('maxdepth', 25) ignorecase = kw.get('ignorecase', False) quiet = kw.get('quiet', False) pats = (ant_matcher(incl, ignorecase), ant_matcher(excl, ignorecase)) if kw.get('generator'): return Utils.lazy_generator(self.ant_iter, (ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet)) it = self.ant_iter(ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet) if kw.get('flat'): # returns relative paths as a space-delimited string # prefer Node objects whenever possible return ' '.join(x.path_from(self) for x in it) return list(it) # ---------------------------------------------------------------------------- # the methods below require the source/build folders (bld.srcnode/bld.bldnode) def is_src(self): """ Returns True if the node is below the source directory. Note that ``!is_src() ≠ is_bld()`` :rtype: bool """ cur = self x = self.ctx.srcnode y = self.ctx.bldnode while cur.parent: if cur is y: return False if cur is x: return True cur = cur.parent return False def is_bld(self): """ Returns True if the node is below the build directory. Note that ``!is_bld() ≠ is_src()`` :rtype: bool """ cur = self y = self.ctx.bldnode while cur.parent: if cur is y: return True cur = cur.parent return False def get_src(self): """ Returns the corresponding Node object in the source directory (or self if already under the source directory). Use this method only if the purpose is to create a Node object (this is common with folders but not with files, see ticket 1937) :rtype: :py:class:`waflib.Node.Node` """ cur = self x = self.ctx.srcnode y = self.ctx.bldnode lst = [] while cur.parent: if cur is y: lst.reverse() return x.make_node(lst) if cur is x: return self lst.append(cur.name) cur = cur.parent return self def get_bld(self): """ Return the corresponding Node object in the build directory (or self if already under the build directory). Use this method only if the purpose is to create a Node object (this is common with folders but not with files, see ticket 1937) :rtype: :py:class:`waflib.Node.Node` """ cur = self x = self.ctx.srcnode y = self.ctx.bldnode lst = [] while cur.parent: if cur is y: return self if cur is x: lst.reverse() return self.ctx.bldnode.make_node(lst) lst.append(cur.name) cur = cur.parent # the file is external to the current project, make a fake root in the current build directory lst.reverse() if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'): lst[0] = lst[0][0] return self.ctx.bldnode.make_node(['__root__'] + lst) def find_resource(self, lst): """ Use this method in the build phase to find source files corresponding to the relative path given. First it looks up the Node data structure to find any declared Node object in the build directory. If None is found, it then considers the filesystem in the source directory. :param lst: relative path :type lst: string or list of string :returns: the corresponding Node object or None :rtype: :py:class:`waflib.Node.Node` """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.get_bld().search_node(lst) if not node: node = self.get_src().find_node(lst) if node and node.isdir(): return None return node def find_or_declare(self, lst): """ Use this method in the build phase to declare output files which are meant to be written in the build directory. This method creates the Node object and its parent folder as needed. :param lst: relative path :type lst: string or list of string """ if isinstance(lst, str) and os.path.isabs(lst): node = self.ctx.root.make_node(lst) else: node = self.get_bld().make_node(lst) node.parent.mkdir() return node def find_dir(self, lst): """ Searches for a folder on the filesystem (see :py:meth:`waflib.Node.Node.find_node`) :param lst: relative path :type lst: string or list of string :returns: The corresponding Node object or None if there is no such folder :rtype: :py:class:`waflib.Node.Node` """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.find_node(lst) if node and not node.isdir(): return None return node # helpers for building things def change_ext(self, ext, ext_in=None): """ Declares a build node with a distinct extension; this is uses :py:meth:`waflib.Node.Node.find_or_declare` :return: A build node of the same path, but with a different extension :rtype: :py:class:`waflib.Node.Node` """ name = self.name if ext_in is None: k = name.rfind('.') if k >= 0: name = name[:k] + ext else: name = name + ext else: name = name[:- len(ext_in)] + ext return self.parent.find_or_declare([name]) def bldpath(self): """ Returns the relative path seen from the build directory ``src/foo.cpp`` :rtype: string """ return self.path_from(self.ctx.bldnode) def srcpath(self): """ Returns the relative path seen from the source directory ``../src/foo.cpp`` :rtype: string """ return self.path_from(self.ctx.srcnode) def relpath(self): """ If a file in the build directory, returns :py:meth:`waflib.Node.Node.bldpath`, else returns :py:meth:`waflib.Node.Node.srcpath` :rtype: string """ cur = self x = self.ctx.bldnode while cur.parent: if cur is x: return self.bldpath() cur = cur.parent return self.srcpath() def bld_dir(self): """ Equivalent to self.parent.bldpath() :rtype: string """ return self.parent.bldpath() def h_file(self): """ See :py:func:`waflib.Utils.h_file` :return: a hash representing the file contents :rtype: string or bytes """ return Utils.h_file(self.abspath()) def get_bld_sig(self): """ Returns a signature (see :py:meth:`waflib.Node.Node.h_file`) for the purpose of build dependency calculation. This method uses a per-context cache. :return: a hash representing the object contents :rtype: string or bytes """ # previous behaviour can be set by returning self.ctx.node_sigs[self] when a build node try: cache = self.ctx.cache_sig except AttributeError: cache = self.ctx.cache_sig = {} try: ret = cache[self] except KeyError: p = self.abspath() try: ret = cache[self] = self.h_file() except EnvironmentError: if self.isdir(): # allow folders as build nodes, do not use the creation time st = os.stat(p) ret = cache[self] = Utils.h_list([p, st.st_ino, st.st_mode]) return ret raise return ret pickle_lock = Utils.threading.Lock() """Lock mandatory for thread-safe node serialization""" class Nod3(Node): """Mandatory subclass for thread-safe node serialization""" pass # do not remove ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Options.py0000660000000000000000000002726600000000000021427 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Scott Newton, 2005 (scottn) # Thomas Nagy, 2006-2018 (ita) """ Support for waf command-line options Provides default and command-line options, as well the command that reads the ``options`` wscript function. """ import os, tempfile, optparse, sys, re from waflib import Logs, Utils, Context, Errors options = optparse.Values() """ A global dictionary representing user-provided command-line options:: $ waf --foo=bar """ commands = [] """ List of commands to execute extracted from the command-line. This list is consumed during the execution by :py:func:`waflib.Scripting.run_commands`. """ envvars = [] """ List of environment variable declarations placed after the Waf executable name. These are detected by searching for "=" in the remaining arguments. You probably do not want to use this. """ lockfile = os.environ.get('WAFLOCK', '.lock-waf_%s_build' % sys.platform) """ Name of the lock file that marks a project as configured """ class opt_parser(optparse.OptionParser): """ Command-line options parser. """ def __init__(self, ctx, allow_unknown=False): optparse.OptionParser.__init__(self, conflict_handler='resolve', add_help_option=False, version='%s %s (%s)' % (Context.WAFNAME, Context.WAFVERSION, Context.WAFREVISION)) self.formatter.width = Logs.get_term_cols() self.ctx = ctx self.allow_unknown = allow_unknown def _process_args(self, largs, rargs, values): """ Custom _process_args to allow unknown options according to the allow_unknown status """ while rargs: try: optparse.OptionParser._process_args(self,largs,rargs,values) except (optparse.BadOptionError, optparse.AmbiguousOptionError) as e: if self.allow_unknown: largs.append(e.opt_str) else: self.error(str(e)) def _process_long_opt(self, rargs, values): # --custom-option=-ftxyz is interpreted as -f -t... see #2280 if self.allow_unknown: back = [] + rargs try: optparse.OptionParser._process_long_opt(self, rargs, values) except optparse.BadOptionError: while rargs: rargs.pop() rargs.extend(back) rargs.pop(0) raise else: optparse.OptionParser._process_long_opt(self, rargs, values) def print_usage(self, file=None): return self.print_help(file) def get_usage(self): """ Builds the message to print on ``waf --help`` :rtype: string """ cmds_str = {} for cls in Context.classes: if not cls.cmd or cls.cmd == 'options' or cls.cmd.startswith( '_' ): continue s = cls.__doc__ or '' cmds_str[cls.cmd] = s if Context.g_module: for (k, v) in Context.g_module.__dict__.items(): if k in ('options', 'init', 'shutdown'): continue if type(v) is type(Context.create_context): if v.__doc__ and not k.startswith('_'): cmds_str[k] = v.__doc__ just = 0 for k in cmds_str: just = max(just, len(k)) lst = [' %s: %s' % (k.ljust(just), v) for (k, v) in cmds_str.items()] lst.sort() ret = '\n'.join(lst) return '''%s [commands] [options] Main commands (example: ./%s build -j4) %s ''' % (Context.WAFNAME, Context.WAFNAME, ret) class OptionsContext(Context.Context): """ Collects custom options from wscript files and parses the command line. Sets the global :py:const:`waflib.Options.commands` and :py:const:`waflib.Options.options` values. """ cmd = 'options' fun = 'options' def __init__(self, **kw): super(OptionsContext, self).__init__(**kw) self.parser = opt_parser(self) """Instance of :py:class:`waflib.Options.opt_parser`""" self.option_groups = {} jobs = self.jobs() p = self.add_option color = os.environ.get('NOCOLOR', '') and 'no' or 'auto' if os.environ.get('CLICOLOR', '') == '0': color = 'no' elif os.environ.get('CLICOLOR_FORCE', '') == '1': color = 'yes' p('-c', '--color', dest='colors', default=color, action='store', help='whether to use colors (yes/no/auto) [default: auto]', choices=('yes', 'no', 'auto')) p('-j', '--jobs', dest='jobs', default=jobs, type='int', help='amount of parallel jobs (%r)' % jobs) p('-k', '--keep', dest='keep', default=0, action='count', help='continue despite errors (-kk to try harder)') p('-v', '--verbose', dest='verbose', default=0, action='count', help='verbosity level -v -vv or -vvv [default: 0]') p('--zones', dest='zones', default='', action='store', help='debugging zones (task_gen, deps, tasks, etc)') p('--profile', dest='profile', default=0, action='store_true', help=optparse.SUPPRESS_HELP) p('--pdb', dest='pdb', default=0, action='store_true', help=optparse.SUPPRESS_HELP) p('-h', '--help', dest='whelp', default=0, action='store_true', help="show this help message and exit") gr = self.add_option_group('Configuration options') self.option_groups['configure options'] = gr gr.add_option('-o', '--out', action='store', default='', help='build dir for the project', dest='out') gr.add_option('-t', '--top', action='store', default='', help='src dir for the project', dest='top') gr.add_option('--no-lock-in-run', action='store_true', default=os.environ.get('NO_LOCK_IN_RUN', ''), help=optparse.SUPPRESS_HELP, dest='no_lock_in_run') gr.add_option('--no-lock-in-out', action='store_true', default=os.environ.get('NO_LOCK_IN_OUT', ''), help=optparse.SUPPRESS_HELP, dest='no_lock_in_out') gr.add_option('--no-lock-in-top', action='store_true', default=os.environ.get('NO_LOCK_IN_TOP', ''), help=optparse.SUPPRESS_HELP, dest='no_lock_in_top') default_prefix = getattr(Context.g_module, 'default_prefix', os.environ.get('PREFIX')) if not default_prefix: if Utils.unversioned_sys_platform() == 'win32': d = tempfile.gettempdir() default_prefix = d[0].upper() + d[1:] # win32 preserves the case, but gettempdir does not else: default_prefix = '/usr/local/' gr.add_option('--prefix', dest='prefix', default=default_prefix, help='installation prefix [default: %r]' % default_prefix) gr.add_option('--bindir', dest='bindir', help='bindir') gr.add_option('--libdir', dest='libdir', help='libdir') gr = self.add_option_group('Build and installation options') self.option_groups['build and install options'] = gr gr.add_option('-p', '--progress', dest='progress_bar', default=0, action='count', help= '-p: progress bar; -pp: ide output') gr.add_option('--targets', dest='targets', default='', action='store', help='task generators, e.g. "target1,target2"') gr = self.add_option_group('Step options') self.option_groups['step options'] = gr gr.add_option('--files', dest='files', default='', action='store', help='files to process, by regexp, e.g. "*/main.c,*/test/main.o"') default_destdir = os.environ.get('DESTDIR', '') gr = self.add_option_group('Installation and uninstallation options') self.option_groups['install/uninstall options'] = gr gr.add_option('--destdir', help='installation root [default: %r]' % default_destdir, default=default_destdir, dest='destdir') gr.add_option('-f', '--force', dest='force', default=False, action='store_true', help='force file installation') gr.add_option('--distcheck-args', metavar='ARGS', help='arguments to pass to distcheck', default=None, action='store') def jobs(self): """ Finds the optimal amount of cpu cores to use for parallel jobs. At runtime the options can be obtained from :py:const:`waflib.Options.options` :: from waflib.Options import options njobs = options.jobs :return: the amount of cpu cores :rtype: int """ count = int(os.environ.get('JOBS', 0)) if count < 1: if 'NUMBER_OF_PROCESSORS' in os.environ: # on Windows, use the NUMBER_OF_PROCESSORS environment variable count = int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) else: # on everything else, first try the POSIX sysconf values if hasattr(os, 'sysconf_names'): if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: count = int(os.sysconf('SC_NPROCESSORS_ONLN')) elif 'SC_NPROCESSORS_CONF' in os.sysconf_names: count = int(os.sysconf('SC_NPROCESSORS_CONF')) if not count and os.name not in ('nt', 'java'): try: tmp = self.cmd_and_log(['sysctl', '-n', 'hw.ncpu'], quiet=0) except Errors.WafError: pass else: if re.match('^[0-9]+$', tmp): count = int(tmp) if count < 1: count = 1 elif count > 1024: count = 1024 return count def add_option(self, *k, **kw): """ Wraps ``optparse.add_option``:: def options(ctx): ctx.add_option('-u', '--use', dest='use', default=False, action='store_true', help='a boolean option') :rtype: optparse option object """ return self.parser.add_option(*k, **kw) def add_option_group(self, *k, **kw): """ Wraps ``optparse.add_option_group``:: def options(ctx): gr = ctx.add_option_group('some options') gr.add_option('-u', '--use', dest='use', default=False, action='store_true') :rtype: optparse option group object """ try: gr = self.option_groups[k[0]] except KeyError: gr = self.parser.add_option_group(*k, **kw) self.option_groups[k[0]] = gr return gr def get_option_group(self, opt_str): """ Wraps ``optparse.get_option_group``:: def options(ctx): gr = ctx.get_option_group('configure options') gr.add_option('-o', '--out', action='store', default='', help='build dir for the project', dest='out') :rtype: optparse option group object """ try: return self.option_groups[opt_str] except KeyError: for group in self.parser.option_groups: if group.title == opt_str: return group return None def sanitize_path(self, path, cwd=None): if not cwd: cwd = Context.launch_dir p = os.path.expanduser(path) p = os.path.join(cwd, p) p = os.path.normpath(p) p = os.path.abspath(p) return p def parse_cmd_args(self, _args=None, cwd=None, allow_unknown=False): """ Just parse the arguments """ self.parser.allow_unknown = allow_unknown (options, leftover_args) = self.parser.parse_args(args=_args) envvars = [] commands = [] for arg in leftover_args: if '=' in arg: envvars.append(arg) elif arg != 'options': commands.append(arg) if options.jobs < 1: options.jobs = 1 for name in 'top out destdir prefix bindir libdir'.split(): # those paths are usually expanded from Context.launch_dir if getattr(options, name, None): path = self.sanitize_path(getattr(options, name), cwd) setattr(options, name, path) return options, commands, envvars def init_module_vars(self, arg_options, arg_commands, arg_envvars): options.__dict__.clear() del commands[:] del envvars[:] options.__dict__.update(arg_options.__dict__) commands.extend(arg_commands) envvars.extend(arg_envvars) for var in envvars: (name, value) = var.split('=', 1) os.environ[name.strip()] = value def init_logs(self, options, commands, envvars): Logs.verbose = options.verbose if options.verbose >= 1: self.load('errcheck') colors = {'yes' : 2, 'auto' : 1, 'no' : 0}[options.colors] Logs.enable_colors(colors) if options.zones: Logs.zones = options.zones.split(',') if not Logs.verbose: Logs.verbose = 1 elif Logs.verbose > 0: Logs.zones = ['runner'] if Logs.verbose > 2: Logs.zones = ['*'] def parse_args(self, _args=None): """ Parses arguments from a list which is not necessarily the command-line. Initializes the module variables options, commands and envvars If help is requested, prints it and exit the application :param _args: arguments :type _args: list of strings """ options, commands, envvars = self.parse_cmd_args() self.init_logs(options, commands, envvars) self.init_module_vars(options, commands, envvars) def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ super(OptionsContext, self).execute() self.parse_args() Utils.alloc_process_pool(options.jobs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Runner.py0000660000000000000000000004001000000000000021223 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Runner.py: Task scheduling and execution """ import heapq, traceback try: from queue import Queue, PriorityQueue except ImportError: from Queue import Queue try: from Queue import PriorityQueue except ImportError: class PriorityQueue(Queue): def _init(self, maxsize): self.maxsize = maxsize self.queue = [] def _put(self, item): heapq.heappush(self.queue, item) def _get(self): return heapq.heappop(self.queue) from waflib import Utils, Task, Errors, Logs GAP = 5 """ Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run """ class PriorityTasks(object): def __init__(self): self.lst = [] def __len__(self): return len(self.lst) def __iter__(self): return iter(self.lst) def __str__(self): return 'PriorityTasks: [%s]' % '\n '.join(str(x) for x in self.lst) def clear(self): self.lst = [] def append(self, task): heapq.heappush(self.lst, task) def appendleft(self, task): "Deprecated, do not use" heapq.heappush(self.lst, task) def pop(self): return heapq.heappop(self.lst) def extend(self, lst): if self.lst: for x in lst: self.append(x) else: if isinstance(lst, list): self.lst = lst heapq.heapify(lst) else: self.lst = lst.lst class Consumer(Utils.threading.Thread): """ Daemon thread object that executes a task. It shares a semaphore with the coordinator :py:class:`waflib.Runner.Spawner`. There is one instance per task to consume. """ def __init__(self, spawner, task): Utils.threading.Thread.__init__(self) self.task = task """Task to execute""" self.spawner = spawner """Coordinator object""" self.setDaemon(1) self.start() def run(self): """ Processes a single task """ try: if not self.spawner.master.stop: self.spawner.master.process_task(self.task) finally: self.spawner.sem.release() self.spawner.master.out.put(self.task) self.task = None self.spawner = None class Spawner(Utils.threading.Thread): """ Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each :py:class:`waflib.Task.Task` instance. """ def __init__(self, master): Utils.threading.Thread.__init__(self) self.master = master """:py:class:`waflib.Runner.Parallel` producer instance""" self.sem = Utils.threading.Semaphore(master.numjobs) """Bounded semaphore that prevents spawning more than *n* concurrent consumers""" self.setDaemon(1) self.start() def run(self): """ Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop` """ try: self.loop() except Exception: # Python 2 prints unnecessary messages when shutting down # we also want to stop the thread properly pass def loop(self): """ Consumes task objects from the producer; ends when the producer has no more task to provide. """ master = self.master while 1: task = master.ready.get() self.sem.acquire() if not master.stop: task.log_display(task.generator.bld) Consumer(self, task) class Parallel(object): """ Schedule the tasks obtained from the build context for execution. """ def __init__(self, bld, j=2): """ The initialization requires a build context reference for computing the total number of jobs. """ self.numjobs = j """ Amount of parallel consumers to use """ self.bld = bld """ Instance of :py:class:`waflib.Build.BuildContext` """ self.outstanding = PriorityTasks() """Heap of :py:class:`waflib.Task.Task` that may be ready to be executed""" self.postponed = PriorityTasks() """Heap of :py:class:`waflib.Task.Task` which are not ready to run for non-DAG reasons""" self.incomplete = set() """List of :py:class:`waflib.Task.Task` waiting for dependent tasks to complete (DAG)""" self.ready = PriorityQueue(0) """List of :py:class:`waflib.Task.Task` ready to be executed by consumers""" self.out = Queue(0) """List of :py:class:`waflib.Task.Task` returned by the task consumers""" self.count = 0 """Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`""" self.processed = 0 """Amount of tasks processed""" self.stop = False """Error flag to stop the build""" self.error = [] """Tasks that could not be executed""" self.biter = None """Task iterator which must give groups of parallelizable tasks when calling ``next()``""" self.dirty = False """ Flag that indicates that the build cache must be saved when a task was executed (calls :py:meth:`waflib.Build.BuildContext.store`)""" self.revdeps = Utils.defaultdict(set) """ The reverse dependency graph of dependencies obtained from Task.run_after """ self.spawner = None """ Coordinating daemon thread that spawns thread consumers """ if self.numjobs > 1: self.spawner = Spawner(self) def get_next_task(self): """ Obtains the next Task instance to run :rtype: :py:class:`waflib.Task.Task` """ if not self.outstanding: return None return self.outstanding.pop() def postpone(self, tsk): """ Adds the task to the list :py:attr:`waflib.Runner.Parallel.postponed`. The order is scrambled so as to consume as many tasks in parallel as possible. :param tsk: task instance :type tsk: :py:class:`waflib.Task.Task` """ self.postponed.append(tsk) def refill_task_list(self): """ Pulls a next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`. Ensures that all tasks in the current build group are complete before processing the next one. """ while self.count > self.numjobs * GAP: self.get_out() while not self.outstanding: if self.count: self.get_out() if self.outstanding: break elif self.postponed: try: cond = self.deadlock == self.processed except AttributeError: pass else: if cond: # The most common reason is conflicting build order declaration # for example: "X run_after Y" and "Y run_after X" # Another can be changing "run_after" dependencies while the build is running # for example: updating "tsk.run_after" in the "runnable_status" method lst = [] for tsk in self.postponed: deps = [id(x) for x in tsk.run_after if not x.hasrun] lst.append('%s\t-> %r' % (repr(tsk), deps)) if not deps: lst.append('\n task %r dependencies are done, check its *runnable_status*?' % id(tsk)) raise Errors.WafError('Deadlock detected: check the task build order%s' % ''.join(lst)) self.deadlock = self.processed if self.postponed: self.outstanding.extend(self.postponed) self.postponed.clear() elif not self.count: if self.incomplete: for x in self.incomplete: for k in x.run_after: if not k.hasrun: break else: # dependency added after the build started without updating revdeps self.incomplete.remove(x) self.outstanding.append(x) break else: if self.stop or self.error: break raise Errors.WafError('Broken revdeps detected on %r' % self.incomplete) else: tasks = next(self.biter) ready, waiting = self.prio_and_split(tasks) self.outstanding.extend(ready) self.incomplete.update(waiting) self.total = self.bld.total() break def add_more_tasks(self, tsk): """ If a task provides :py:attr:`waflib.Task.Task.more_tasks`, then the tasks contained in that list are added to the current build and will be processed before the next build group. The priorities for dependent tasks are not re-calculated globally :param tsk: task instance :type tsk: :py:attr:`waflib.Task.Task` """ if getattr(tsk, 'more_tasks', None): more = set(tsk.more_tasks) groups_done = set() def iteri(a, b): for x in a: yield x for x in b: yield x # Update the dependency tree # this assumes that task.run_after values were updated for x in iteri(self.outstanding, self.incomplete): for k in x.run_after: if isinstance(k, Task.TaskGroup): if k not in groups_done: groups_done.add(k) for j in k.prev & more: self.revdeps[j].add(k) elif k in more: self.revdeps[k].add(x) ready, waiting = self.prio_and_split(tsk.more_tasks) self.outstanding.extend(ready) self.incomplete.update(waiting) self.total += len(tsk.more_tasks) def mark_finished(self, tsk): def try_unfreeze(x): # DAG ancestors are likely to be in the incomplete set # This assumes that the run_after contents have not changed # after the build starts, else a deadlock may occur if x in self.incomplete: # TODO remove dependencies to free some memory? # x.run_after.remove(tsk) for k in x.run_after: if not k.hasrun: break else: self.incomplete.remove(x) self.outstanding.append(x) if tsk in self.revdeps: for x in self.revdeps[tsk]: if isinstance(x, Task.TaskGroup): x.prev.remove(tsk) if not x.prev: for k in x.next: # TODO necessary optimization? k.run_after.remove(x) try_unfreeze(k) # TODO necessary optimization? x.next = [] else: try_unfreeze(x) del self.revdeps[tsk] if hasattr(tsk, 'semaphore'): sem = tsk.semaphore try: sem.release(tsk) except KeyError: # TODO pass else: while sem.waiting and not sem.is_locked(): # take a frozen task, make it ready to run x = sem.waiting.pop() self._add_task(x) def get_out(self): """ Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution. Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`. :rtype: :py:attr:`waflib.Task.Task` """ tsk = self.out.get() if not self.stop: self.add_more_tasks(tsk) self.mark_finished(tsk) self.count -= 1 self.dirty = True return tsk def add_task(self, tsk): """ Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them. :param tsk: task instance :type tsk: :py:attr:`waflib.Task.Task` """ # TODO change in waf 2.1 self.ready.put(tsk) def _add_task(self, tsk): if hasattr(tsk, 'semaphore'): sem = tsk.semaphore try: sem.acquire(tsk) except IndexError: sem.waiting.add(tsk) return self.count += 1 self.processed += 1 if self.numjobs == 1: tsk.log_display(tsk.generator.bld) try: self.process_task(tsk) finally: self.out.put(tsk) else: self.add_task(tsk) def process_task(self, tsk): """ Processes a task and attempts to stop the build in case of errors """ tsk.process() if tsk.hasrun != Task.SUCCESS: self.error_handler(tsk) def skip(self, tsk): """ Mark a task as skipped/up-to-date """ tsk.hasrun = Task.SKIPPED self.mark_finished(tsk) def cancel(self, tsk): """ Mark a task as failed because of unsatisfiable dependencies """ tsk.hasrun = Task.CANCELED self.mark_finished(tsk) def error_handler(self, tsk): """ Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set, unless the build is executed with:: $ waf build -k :param tsk: task instance :type tsk: :py:attr:`waflib.Task.Task` """ if not self.bld.keep: self.stop = True self.error.append(tsk) def task_status(self, tsk): """ Obtains the task status to decide whether to run it immediately or not. :return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER` :rtype: integer """ try: return tsk.runnable_status() except Exception: self.processed += 1 tsk.err_msg = traceback.format_exc() if not self.stop and self.bld.keep: self.skip(tsk) if self.bld.keep == 1: # if -k stop on the first exception, if -kk try to go as far as possible if Logs.verbose > 1 or not self.error: self.error.append(tsk) self.stop = True else: if Logs.verbose > 1: self.error.append(tsk) return Task.EXCEPTION tsk.hasrun = Task.EXCEPTION self.error_handler(tsk) return Task.EXCEPTION def start(self): """ Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to :py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out` and marks the build as failed by setting the ``stop`` flag. If only one job is used, then executes the tasks one by one, without consumers. """ self.total = self.bld.total() while not self.stop: self.refill_task_list() # consider the next task tsk = self.get_next_task() if not tsk: if self.count: # tasks may add new ones after they are run continue else: # no tasks to run, no tasks running, time to exit break if tsk.hasrun: # if the task is marked as "run", just skip it self.processed += 1 continue if self.stop: # stop immediately after a failure is detected break st = self.task_status(tsk) if st == Task.RUN_ME: self._add_task(tsk) elif st == Task.ASK_LATER: self.postpone(tsk) elif st == Task.SKIP_ME: self.processed += 1 self.skip(tsk) self.add_more_tasks(tsk) elif st == Task.CANCEL_ME: # A dependency problem has occurred, and the # build is most likely run with `waf -k` if Logs.verbose > 1: self.error.append(tsk) self.processed += 1 self.cancel(tsk) # self.count represents the tasks that have been made available to the consumer threads # collect all the tasks after an error else the message may be incomplete while self.error and self.count: self.get_out() self.ready.put(None) if not self.stop: assert not self.count assert not self.postponed assert not self.incomplete def prio_and_split(self, tasks): """ Label input tasks with priority values, and return a pair containing the tasks that are ready to run and the tasks that are necessarily waiting for other tasks to complete. The priority system is really meant as an optional layer for optimization: dependency cycles are found quickly, and builds should be more efficient. A high priority number means that a task is processed first. This method can be overridden to disable the priority system:: def prio_and_split(self, tasks): return tasks, [] :return: A pair of task lists :rtype: tuple """ # to disable: #return tasks, [] for x in tasks: x.visited = 0 reverse = self.revdeps groups_done = set() for x in tasks: for k in x.run_after: if isinstance(k, Task.TaskGroup): if k not in groups_done: groups_done.add(k) for j in k.prev: reverse[j].add(k) else: reverse[k].add(x) # the priority number is not the tree depth def visit(n): if isinstance(n, Task.TaskGroup): return sum(visit(k) for k in n.next) if n.visited == 0: n.visited = 1 if n in reverse: rev = reverse[n] n.prio_order = n.tree_weight + len(rev) + sum(visit(k) for k in rev) else: n.prio_order = n.tree_weight n.visited = 2 elif n.visited == 1: raise Errors.WafError('Dependency cycle found!') return n.prio_order for x in tasks: if x.visited != 0: # must visit all to detect cycles continue try: visit(x) except Errors.WafError: self.debug_cycles(tasks, reverse) ready = [] waiting = [] for x in tasks: for k in x.run_after: if not k.hasrun: waiting.append(x) break else: ready.append(x) return (ready, waiting) def debug_cycles(self, tasks, reverse): tmp = {} for x in tasks: tmp[x] = 0 def visit(n, acc): if isinstance(n, Task.TaskGroup): for k in n.next: visit(k, acc) return if tmp[n] == 0: tmp[n] = 1 for k in reverse.get(n, []): visit(k, [n] + acc) tmp[n] = 2 elif tmp[n] == 1: lst = [] for tsk in acc: lst.append(repr(tsk)) if tsk is n: # exclude prior nodes, we want the minimum cycle break raise Errors.WafError('Task dependency cycle in "run_after" constraints: %s' % ''.join(lst)) for x in tasks: visit(x, []) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Scripting.py0000660000000000000000000004037400000000000021731 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) "Module called for configuring, compiling and installing targets" from __future__ import with_statement import os, shlex, shutil, traceback, errno, sys, stat from waflib import Utils, Configure, Logs, Options, ConfigSet, Context, Errors, Build, Node build_dir_override = None no_climb_commands = ['configure'] default_cmd = "build" def waf_entry_point(current_directory, version, wafdir): """ This is the main entry point, all Waf execution starts here. :param current_directory: absolute path representing the current directory :type current_directory: string :param version: version number :type version: string :param wafdir: absolute path representing the directory of the waf library :type wafdir: string """ Logs.init_log() if Context.WAFVERSION != version: Logs.error('Waf script %r and library %r do not match (directory %r)', version, Context.WAFVERSION, wafdir) sys.exit(1) # Store current directory before any chdir Context.waf_dir = wafdir Context.run_dir = Context.launch_dir = current_directory start_dir = current_directory no_climb = os.environ.get('NOCLIMB') if len(sys.argv) > 1: # os.path.join handles absolute paths # if sys.argv[1] is not an absolute path, then it is relative to the current working directory potential_wscript = os.path.join(current_directory, sys.argv[1]) if os.path.basename(potential_wscript) == Context.WSCRIPT_FILE and os.path.isfile(potential_wscript): # need to explicitly normalize the path, as it may contain extra '/.' path = os.path.normpath(os.path.dirname(potential_wscript)) start_dir = os.path.abspath(path) no_climb = True sys.argv.pop(1) ctx = Context.create_context('options') (options, commands, env) = ctx.parse_cmd_args(allow_unknown=True) if options.top: start_dir = Context.run_dir = Context.top_dir = options.top no_climb = True if options.out: Context.out_dir = options.out # if 'configure' is in the commands, do not search any further if not no_climb: for k in no_climb_commands: for y in commands: if y.startswith(k): no_climb = True break # try to find a lock file (if the project was configured) # at the same time, store the first wscript file seen cur = start_dir while cur: try: lst = os.listdir(cur) except OSError: lst = [] Logs.error('Directory %r is unreadable!', cur) if Options.lockfile in lst: env = ConfigSet.ConfigSet() try: env.load(os.path.join(cur, Options.lockfile)) ino = os.stat(cur)[stat.ST_INO] except EnvironmentError: pass else: # check if the folder was not moved for x in (env.run_dir, env.top_dir, env.out_dir): if not x: continue if Utils.is_win32: if cur == x: load = True break else: # if the filesystem features symlinks, compare the inode numbers try: ino2 = os.stat(x)[stat.ST_INO] except OSError: pass else: if ino == ino2: load = True break else: Logs.warn('invalid lock file in %s', cur) load = False if load: Context.run_dir = env.run_dir Context.top_dir = env.top_dir Context.out_dir = env.out_dir break if not Context.run_dir: if Context.WSCRIPT_FILE in lst: Context.run_dir = cur next = os.path.dirname(cur) if next == cur: break cur = next if no_climb: break wscript = os.path.normpath(os.path.join(Context.run_dir, Context.WSCRIPT_FILE)) if not os.path.exists(wscript): if options.whelp: Logs.warn('These are the generic options (no wscript/project found)') ctx.parser.print_help() sys.exit(0) Logs.error('Waf: Run from a folder containing a %r file (or try -h for the generic options)', Context.WSCRIPT_FILE) sys.exit(1) try: os.chdir(Context.run_dir) except OSError: Logs.error('Waf: The folder %r is unreadable', Context.run_dir) sys.exit(1) try: set_main_module(wscript) except Errors.WafError as e: Logs.pprint('RED', e.verbose_msg) Logs.error(str(e)) sys.exit(1) except Exception as e: Logs.error('Waf: The wscript in %r is unreadable', Context.run_dir) traceback.print_exc(file=sys.stdout) sys.exit(2) if options.profile: import cProfile, pstats cProfile.runctx('from waflib import Scripting; Scripting.run_commands()', {}, {}, 'profi.txt') p = pstats.Stats('profi.txt') p.sort_stats('time').print_stats(75) # or 'cumulative' else: try: try: run_commands() except: if options.pdb: import pdb type, value, tb = sys.exc_info() traceback.print_exc() pdb.post_mortem(tb) else: raise except Errors.WafError as e: if Logs.verbose > 1: Logs.pprint('RED', e.verbose_msg) Logs.error(e.msg) sys.exit(1) except SystemExit: raise except Exception as e: traceback.print_exc(file=sys.stdout) sys.exit(2) except KeyboardInterrupt: Logs.pprint('RED', 'Interrupted') sys.exit(68) def set_main_module(file_path): """ Read the main wscript file into :py:const:`waflib.Context.Context.g_module` and bind default functions such as ``init``, ``dist``, ``distclean`` if not defined. Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization. :param file_path: absolute path representing the top-level wscript file :type file_path: string """ Context.g_module = Context.load_module(file_path) Context.g_module.root_path = file_path # note: to register the module globally, use the following: # sys.modules['wscript_main'] = g_module def set_def(obj): name = obj.__name__ if not name in Context.g_module.__dict__: setattr(Context.g_module, name, obj) for k in (dist, distclean, distcheck): set_def(k) # add dummy init and shutdown functions if they're not defined if not 'init' in Context.g_module.__dict__: Context.g_module.init = Utils.nada if not 'shutdown' in Context.g_module.__dict__: Context.g_module.shutdown = Utils.nada if not 'options' in Context.g_module.__dict__: Context.g_module.options = Utils.nada def parse_options(): """ Parses the command-line options and initialize the logging system. Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization. """ ctx = Context.create_context('options') ctx.execute() if not Options.commands: if isinstance(default_cmd, list): Options.commands.extend(default_cmd) else: Options.commands.append(default_cmd) if Options.options.whelp: ctx.parser.print_help() sys.exit(0) def run_command(cmd_name): """ Executes a single Waf command. Called by :py:func:`waflib.Scripting.run_commands`. :param cmd_name: command to execute, like ``build`` :type cmd_name: string """ ctx = Context.create_context(cmd_name) ctx.log_timer = Utils.Timer() ctx.options = Options.options # provided for convenience ctx.cmd = cmd_name try: ctx.execute() finally: # Issue 1374 ctx.finalize() return ctx def run_commands(): """ Execute the Waf commands that were given on the command-line, and the other options Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization, and executed after :py:func:`waflib.Scripting.parse_options`. """ parse_options() run_command('init') while Options.commands: cmd_name = Options.commands.pop(0) ctx = run_command(cmd_name) Logs.info('%r finished successfully (%s)', cmd_name, ctx.log_timer) run_command('shutdown') ########################################################################################### def distclean_dir(dirname): """ Distclean function called in the particular case when:: top == out :param dirname: absolute path of the folder to clean :type dirname: string """ for (root, dirs, files) in os.walk(dirname): for f in files: if f.endswith(('.o', '.moc', '.exe')): fname = os.path.join(root, f) try: os.remove(fname) except OSError: Logs.warn('Could not remove %r', fname) for x in (Context.DBFILE, 'config.log'): try: os.remove(x) except OSError: pass try: shutil.rmtree(Build.CACHE_DIR) except OSError: pass def distclean(ctx): '''removes build folders and data''' def remove_and_log(k, fun): try: fun(k) except EnvironmentError as e: if e.errno != errno.ENOENT: Logs.warn('Could not remove %r', k) # remove waf cache folders on the top-level if not Options.commands: for k in os.listdir('.'): for x in '.waf-2 waf-2 .waf3-2 waf3-2'.split(): if k.startswith(x): remove_and_log(k, shutil.rmtree) # remove a build folder, if any cur = '.' if os.environ.get('NO_LOCK_IN_TOP') or ctx.options.no_lock_in_top: cur = ctx.options.out try: lst = os.listdir(cur) except OSError: Logs.warn('Could not read %r', cur) return if Options.lockfile in lst: f = os.path.join(cur, Options.lockfile) try: env = ConfigSet.ConfigSet(f) except EnvironmentError: Logs.warn('Could not read %r', f) return if not env.out_dir or not env.top_dir: Logs.warn('Invalid lock file %r', f) return if env.out_dir == env.top_dir: distclean_dir(env.out_dir) else: remove_and_log(env.out_dir, shutil.rmtree) env_dirs = [env.out_dir] if not (os.environ.get('NO_LOCK_IN_TOP') or ctx.options.no_lock_in_top): env_dirs.append(env.top_dir) if not (os.environ.get('NO_LOCK_IN_RUN') or ctx.options.no_lock_in_run): env_dirs.append(env.run_dir) for k in env_dirs: p = os.path.join(k, Options.lockfile) remove_and_log(p, os.remove) class Dist(Context.Context): '''creates an archive containing the project source code''' cmd = 'dist' fun = 'dist' algo = 'tar.bz2' ext_algo = {} def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ self.recurse([os.path.dirname(Context.g_module.root_path)]) self.archive() def archive(self): """ Creates the source archive. """ import tarfile arch_name = self.get_arch_name() try: self.base_path except AttributeError: self.base_path = self.path node = self.base_path.make_node(arch_name) try: node.delete() except OSError: pass files = self.get_files() if self.algo.startswith('tar.'): tar = tarfile.open(node.abspath(), 'w:' + self.algo.replace('tar.', '')) for x in files: self.add_tar_file(x, tar) tar.close() elif self.algo == 'zip': import zipfile zip = zipfile.ZipFile(node.abspath(), 'w', compression=zipfile.ZIP_DEFLATED) for x in files: archive_name = self.get_base_name() + '/' + x.path_from(self.base_path) zip.write(x.abspath(), archive_name, zipfile.ZIP_DEFLATED) zip.close() else: self.fatal('Valid algo types are tar.bz2, tar.gz, tar.xz or zip') try: from hashlib import sha256 except ImportError: digest = '' else: digest = ' (sha256=%r)' % sha256(node.read(flags='rb')).hexdigest() Logs.info('New archive created: %s%s', self.arch_name, digest) def get_tar_path(self, node): """ Return the path to use for a node in the tar archive, the purpose of this is to let subclases resolve symbolic links or to change file names :return: absolute path :rtype: string """ return node.abspath() def add_tar_file(self, x, tar): """ Adds a file to the tar archive. Symlinks are not verified. :param x: file path :param tar: tar file object """ p = self.get_tar_path(x) tinfo = tar.gettarinfo(name=p, arcname=self.get_tar_prefix() + '/' + x.path_from(self.base_path)) tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' if os.path.isfile(p): with open(p, 'rb') as f: tar.addfile(tinfo, fileobj=f) else: tar.addfile(tinfo) def get_tar_prefix(self): """ Returns the base path for files added into the archive tar file :rtype: string """ try: return self.tar_prefix except AttributeError: return self.get_base_name() def get_arch_name(self): """ Returns the archive file name. Set the attribute *arch_name* to change the default value:: def dist(ctx): ctx.arch_name = 'ctx.tar.bz2' :rtype: string """ try: self.arch_name except AttributeError: self.arch_name = self.get_base_name() + '.' + self.ext_algo.get(self.algo, self.algo) return self.arch_name def get_base_name(self): """ Returns the default name of the main directory in the archive, which is set to *appname-version*. Set the attribute *base_name* to change the default value:: def dist(ctx): ctx.base_name = 'files' :rtype: string """ try: self.base_name except AttributeError: appname = getattr(Context.g_module, Context.APPNAME, 'noname') version = getattr(Context.g_module, Context.VERSION, '1.0') self.base_name = appname + '-' + version return self.base_name def get_excl(self): """ Returns the patterns to exclude for finding the files in the top-level directory. Set the attribute *excl* to change the default value:: def dist(ctx): ctx.excl = 'build **/*.o **/*.class' :rtype: string """ try: return self.excl except AttributeError: self.excl = Node.exclude_regs + ' **/waf-2.* **/.waf-2.* **/waf3-2.* **/.waf3-2.* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*' if Context.out_dir: nd = self.root.find_node(Context.out_dir) if nd: self.excl += ' ' + nd.path_from(self.base_path) return self.excl def get_files(self): """ Files to package are searched automatically by :py:func:`waflib.Node.Node.ant_glob`. Set *files* to prevent this behaviour:: def dist(ctx): ctx.files = ctx.path.find_node('wscript') Files are also searched from the directory 'base_path', to change it, set:: def dist(ctx): ctx.base_path = path :rtype: list of :py:class:`waflib.Node.Node` """ try: files = self.files except AttributeError: files = self.base_path.ant_glob('**/*', excl=self.get_excl()) return files def dist(ctx): '''makes a tarball for redistributing the sources''' pass class DistCheck(Dist): """creates an archive with dist, then tries to build it""" fun = 'distcheck' cmd = 'distcheck' def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ self.recurse([os.path.dirname(Context.g_module.root_path)]) self.archive() self.check() def make_distcheck_cmd(self, tmpdir): cfg = [] if Options.options.distcheck_args: cfg = shlex.split(Options.options.distcheck_args) else: cfg = [x for x in sys.argv if x.startswith('-')] cmd = [sys.executable, sys.argv[0], 'configure', 'build', 'install', 'uninstall', '--destdir=' + tmpdir] + cfg return cmd def check(self): """ Creates the archive, uncompresses it and tries to build the project """ import tempfile, tarfile with tarfile.open(self.get_arch_name()) as t: for x in t: t.extract(x) instdir = tempfile.mkdtemp('.inst', self.get_base_name()) cmd = self.make_distcheck_cmd(instdir) ret = Utils.subprocess.Popen(cmd, cwd=self.get_base_name()).wait() if ret: raise Errors.WafError('distcheck failed with code %r' % ret) if os.path.exists(instdir): raise Errors.WafError('distcheck succeeded, but files were left in %s' % instdir) shutil.rmtree(self.get_base_name()) def distcheck(ctx): '''checks if the project compiles (tarball from 'dist')''' pass def autoconfigure(execute_method): """ Decorator that enables context commands to run *configure* as needed. """ def execute(self): """ Wraps :py:func:`waflib.Context.Context.execute` on the context class """ if not Configure.autoconfig: return execute_method(self) env = ConfigSet.ConfigSet() do_config = False try: env.load(os.path.join(Context.top_dir, Options.lockfile)) except EnvironmentError: Logs.warn('Configuring the project') do_config = True else: if env.run_dir != Context.run_dir: do_config = True else: h = 0 for f in env.files: try: h = Utils.h_list((h, Utils.readf(f, 'rb'))) except EnvironmentError: do_config = True break else: do_config = h != env.hash if do_config: cmd = env.config_cmd or 'configure' if Configure.autoconfig == 'clobber': tmp = Options.options.__dict__ launch_dir_tmp = Context.launch_dir if env.options: Options.options.__dict__ = env.options Context.launch_dir = env.launch_dir try: run_command(cmd) finally: Options.options.__dict__ = tmp Context.launch_dir = launch_dir_tmp else: run_command(cmd) run_command(self.cmd) else: return execute_method(self) return execute Build.BuildContext.execute = autoconfigure(Build.BuildContext.execute) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Task.py0000660000000000000000000011507600000000000020673 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Tasks represent atomic operations such as processes. """ import os, re, sys, tempfile, traceback from waflib import Utils, Logs, Errors # task states NOT_RUN = 0 """The task was not executed yet""" MISSING = 1 """The task has been executed but the files have not been created""" CRASHED = 2 """The task execution returned a non-zero exit status""" EXCEPTION = 3 """An exception occurred in the task execution""" CANCELED = 4 """A dependency for the task is missing so it was cancelled""" SKIPPED = 8 """The task did not have to be executed""" SUCCESS = 9 """The task was successfully executed""" ASK_LATER = -1 """The task is not ready to be executed""" SKIP_ME = -2 """The task does not need to be executed""" RUN_ME = -3 """The task must be executed""" CANCEL_ME = -4 """The task cannot be executed because of a dependency problem""" COMPILE_TEMPLATE_SHELL = ''' def f(tsk): env = tsk.env gen = tsk.generator bld = gen.bld cwdx = tsk.get_cwd() p = env.get_flat def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s return tsk.exec_command(cmd, cwd=cwdx, env=env.env or None) ''' COMPILE_TEMPLATE_NOSHELL = ''' def f(tsk): env = tsk.env gen = tsk.generator bld = gen.bld cwdx = tsk.get_cwd() def to_list(xx): if isinstance(xx, str): return [xx] return xx def merge(lst1, lst2): if lst1 and lst2: return lst1[:-1] + [lst1[-1] + lst2[0]] + lst2[1:] return lst1 + lst2 lst = [] %s if '' in lst: lst = [x for x in lst if x] tsk.last_cmd = lst return tsk.exec_command(lst, cwd=cwdx, env=env.env or None) ''' COMPILE_TEMPLATE_SIG_VARS = ''' def f(tsk): sig = tsk.generator.bld.hash_env_vars(tsk.env, tsk.vars) tsk.m.update(sig) env = tsk.env gen = tsk.generator bld = gen.bld cwdx = tsk.get_cwd() p = env.get_flat buf = [] %s tsk.m.update(repr(buf).encode()) ''' classes = {} """ The metaclass :py:class:`waflib.Task.store_task_type` stores all class tasks created by user scripts or Waf tools to this dict. It maps class names to class objects. """ class store_task_type(type): """ Metaclass: store the task classes into the dict pointed by the class attribute 'register' which defaults to :py:const:`waflib.Task.classes`, The attribute 'run_str' is compiled into a method 'run' bound to the task class. """ def __init__(cls, name, bases, dict): super(store_task_type, cls).__init__(name, bases, dict) name = cls.__name__ if name != 'evil' and name != 'Task': if getattr(cls, 'run_str', None): # if a string is provided, convert it to a method (f, dvars) = compile_fun(cls.run_str, cls.shell) cls.hcode = Utils.h_cmd(cls.run_str) cls.orig_run_str = cls.run_str # change the name of run_str or it is impossible to subclass with a function cls.run_str = None cls.run = f # process variables cls.vars = list(set(cls.vars + dvars)) cls.vars.sort() if cls.vars: fun = compile_sig_vars(cls.vars) if fun: cls.sig_vars = fun elif getattr(cls, 'run', None) and not 'hcode' in cls.__dict__: # getattr(cls, 'hcode') would look in the upper classes cls.hcode = Utils.h_cmd(cls.run) # be creative getattr(cls, 'register', classes)[name] = cls evil = store_task_type('evil', (object,), {}) "Base class provided to avoid writing a metaclass, so the code can run in python 2.6 and 3.x unmodified" class Task(evil): """ Task objects represents actions to perform such as commands to execute by calling the `run` method. Detecting when to execute a task occurs in the method :py:meth:`waflib.Task.Task.runnable_status`. Detecting which tasks to execute is performed through a hash value returned by :py:meth:`waflib.Task.Task.signature`. The task signature is persistent from build to build. """ vars = [] """ConfigSet variables that should trigger a rebuild (class attribute used for :py:meth:`waflib.Task.Task.sig_vars`)""" always_run = False """Specify whether task instances must always be executed or not (class attribute)""" shell = False """Execute the command with the shell (class attribute)""" color = 'GREEN' """Color for the console display, see :py:const:`waflib.Logs.colors_lst`""" ext_in = [] """File extensions that objects of this task class may use""" ext_out = [] """File extensions that objects of this task class may create""" before = [] """The instances of this class are executed before the instances of classes whose names are in this list""" after = [] """The instances of this class are executed after the instances of classes whose names are in this list""" hcode = Utils.SIG_NIL """String representing an additional hash for the class representation""" keep_last_cmd = False """Whether to keep the last command executed on the instance after execution. This may be useful for certain extensions but it can a lot of memory. """ weight = 0 """Optional weight to tune the priority for task instances. The higher, the earlier. The weight only applies to single task objects.""" tree_weight = 0 """Optional weight to tune the priority of task instances and whole subtrees. The higher, the earlier.""" prio_order = 0 """Priority order set by the scheduler on instances during the build phase. You most likely do not need to set it. """ __slots__ = ('hasrun', 'generator', 'env', 'inputs', 'outputs', 'dep_nodes', 'run_after') def __init__(self, *k, **kw): self.hasrun = NOT_RUN try: self.generator = kw['generator'] except KeyError: self.generator = self self.env = kw['env'] """:py:class:`waflib.ConfigSet.ConfigSet` object (make sure to provide one)""" self.inputs = [] """List of input nodes, which represent the files used by the task instance""" self.outputs = [] """List of output nodes, which represent the files created by the task instance""" self.dep_nodes = [] """List of additional nodes to depend on""" self.run_after = set() """Set of tasks that must be executed before this one""" def __lt__(self, other): return self.priority() > other.priority() def __le__(self, other): return self.priority() >= other.priority() def __gt__(self, other): return self.priority() < other.priority() def __ge__(self, other): return self.priority() <= other.priority() def get_cwd(self): """ :return: current working directory :rtype: :py:class:`waflib.Node.Node` """ bld = self.generator.bld ret = getattr(self, 'cwd', None) or getattr(bld, 'cwd', bld.bldnode) if isinstance(ret, str): if os.path.isabs(ret): ret = bld.root.make_node(ret) else: ret = self.generator.path.make_node(ret) return ret def quote_flag(self, x): """ Surround a process argument by quotes so that a list of arguments can be written to a file :param x: flag :type x: string :return: quoted flag :rtype: string """ old = x if '\\' in x: x = x.replace('\\', '\\\\') if '"' in x: x = x.replace('"', '\\"') if old != x or ' ' in x or '\t' in x or "'" in x: x = '"%s"' % x return x def priority(self): """ Priority of execution; the higher, the earlier :return: the priority value :rtype: a tuple of numeric values """ return (self.weight + self.prio_order, - getattr(self.generator, 'tg_idx_count', 0)) def split_argfile(self, cmd): """ Splits a list of process commands into the executable part and its list of arguments :return: a tuple containing the executable first and then the rest of arguments :rtype: tuple """ return ([cmd[0]], [self.quote_flag(x) for x in cmd[1:]]) def exec_command(self, cmd, **kw): """ Wrapper for :py:meth:`waflib.Context.Context.exec_command`. This version set the current working directory (``build.variant_dir``), applies PATH settings (if self.env.PATH is provided), and can run long commands through a temporary ``@argfile``. :param cmd: process command to execute :type cmd: list of string (best) or string (process will use a shell) :return: the return code :rtype: int Optional parameters: #. cwd: current working directory (Node or string) #. stdout: set to None to prevent waf from capturing the process standard output #. stderr: set to None to prevent waf from capturing the process standard error #. timeout: timeout value (Python 3) """ if not 'cwd' in kw: kw['cwd'] = self.get_cwd() if hasattr(self, 'timeout'): kw['timeout'] = self.timeout if self.env.PATH: env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) env['PATH'] = self.env.PATH if isinstance(self.env.PATH, str) else os.pathsep.join(self.env.PATH) if hasattr(self, 'stdout'): kw['stdout'] = self.stdout if hasattr(self, 'stderr'): kw['stderr'] = self.stderr if not isinstance(cmd, str): if Utils.is_win32: # win32 compares the resulting length http://support.microsoft.com/kb/830473 too_long = sum([len(arg) for arg in cmd]) + len(cmd) > 8192 else: # non-win32 counts the amount of arguments (200k) too_long = len(cmd) > 200000 if too_long and getattr(self, 'allow_argsfile', True): # Shunt arguments to a temporary file if the command is too long. cmd, args = self.split_argfile(cmd) try: (fd, tmp) = tempfile.mkstemp() os.write(fd, '\r\n'.join(args).encode()) os.close(fd) if Logs.verbose: Logs.debug('argfile: @%r -> %r', tmp, args) return self.generator.bld.exec_command(cmd + ['@' + tmp], **kw) finally: try: os.remove(tmp) except OSError: # anti-virus and indexers can keep files open -_- pass return self.generator.bld.exec_command(cmd, **kw) def process(self): """ Runs the task and handles errors :return: 0 or None if everything is fine :rtype: integer """ # remove the task signature immediately before it is executed # so that the task will be executed again in case of failure try: del self.generator.bld.task_sigs[self.uid()] except KeyError: pass try: ret = self.run() except Exception: self.err_msg = traceback.format_exc() self.hasrun = EXCEPTION else: if ret: self.err_code = ret self.hasrun = CRASHED else: try: self.post_run() except Errors.WafError: pass except Exception: self.err_msg = traceback.format_exc() self.hasrun = EXCEPTION else: self.hasrun = SUCCESS if self.hasrun != SUCCESS and self.scan: # rescan dependencies on next run try: del self.generator.bld.imp_sigs[self.uid()] except KeyError: pass def log_display(self, bld): "Writes the execution status on the context logger" if self.generator.bld.progress_bar == 3: return s = self.display() if s: if bld.logger: logger = bld.logger else: logger = Logs if self.generator.bld.progress_bar == 1: c1 = Logs.colors.cursor_off c2 = Logs.colors.cursor_on logger.info(s, extra={'stream': sys.stderr, 'terminator':'', 'c1': c1, 'c2' : c2}) else: logger.info(s, extra={'terminator':'', 'c1': '', 'c2' : ''}) def display(self): """ Returns an execution status for the console, the progress bar, or the IDE output. :rtype: string """ col1 = Logs.colors(self.color) col2 = Logs.colors.NORMAL master = self.generator.bld.producer def cur(): # the current task position, computed as late as possible return master.processed - master.ready.qsize() if self.generator.bld.progress_bar == 1: return self.generator.bld.progress_line(cur(), master.total, col1, col2) if self.generator.bld.progress_bar == 2: ela = str(self.generator.bld.timer) try: ins = ','.join([n.name for n in self.inputs]) except AttributeError: ins = '' try: outs = ','.join([n.name for n in self.outputs]) except AttributeError: outs = '' return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (master.total, cur(), ins, outs, ela) s = str(self) if not s: return None total = master.total n = len(str(total)) fs = '[%%%dd/%%%dd] %%s%%s%%s%%s\n' % (n, n) kw = self.keyword() if kw: kw += ' ' return fs % (cur(), total, kw, col1, s, col2) def hash_constraints(self): """ Identifies a task type for all the constraints relevant for the scheduler: precedence, file production :return: a hash value :rtype: string """ return (tuple(self.before), tuple(self.after), tuple(self.ext_in), tuple(self.ext_out), self.__class__.__name__, self.hcode) def format_error(self): """ Returns an error message to display the build failure reasons :rtype: string """ if Logs.verbose: msg = ': %r\n%r' % (self, getattr(self, 'last_cmd', '')) else: msg = ' (run with -v to display more information)' name = getattr(self.generator, 'name', '') if getattr(self, "err_msg", None): return self.err_msg elif not self.hasrun: return 'task in %r was not executed for some reason: %r' % (name, self) elif self.hasrun == CRASHED: try: return ' -> task in %r failed with exit status %r%s' % (name, self.err_code, msg) except AttributeError: return ' -> task in %r failed%s' % (name, msg) elif self.hasrun == MISSING: return ' -> missing files in %r%s' % (name, msg) elif self.hasrun == CANCELED: return ' -> %r canceled because of missing dependencies' % name else: return 'invalid status for task in %r: %r' % (name, self.hasrun) def colon(self, var1, var2): """ Enable scriptlet expressions of the form ${FOO_ST:FOO} If the first variable (FOO_ST) is empty, then an empty list is returned The results will be slightly different if FOO_ST is a list, for example:: env.FOO = ['p1', 'p2'] env.FOO_ST = '-I%s' # ${FOO_ST:FOO} returns ['-Ip1', '-Ip2'] env.FOO_ST = ['-a', '-b'] # ${FOO_ST:FOO} returns ['-a', '-b', 'p1', '-a', '-b', 'p2'] """ tmp = self.env[var1] if not tmp: return [] if isinstance(var2, str): it = self.env[var2] else: it = var2 if isinstance(tmp, str): return [tmp % x for x in it] else: lst = [] for y in it: lst.extend(tmp) lst.append(y) return lst def __str__(self): "string to display to the user" name = self.__class__.__name__ if self.outputs: if name.endswith(('lib', 'program')) or not self.inputs: node = self.outputs[0] return node.path_from(node.ctx.launch_node()) if not (self.inputs or self.outputs): return self.__class__.__name__ if len(self.inputs) == 1: node = self.inputs[0] return node.path_from(node.ctx.launch_node()) src_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.inputs]) tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs]) if self.outputs: sep = ' -> ' else: sep = '' return '%s: %s%s%s' % (self.__class__.__name__, src_str, sep, tgt_str) def keyword(self): "Display keyword used to prettify the console outputs" name = self.__class__.__name__ if name.endswith(('lib', 'program')): return 'Linking' if len(self.inputs) == 1 and len(self.outputs) == 1: return 'Compiling' if not self.inputs: if self.outputs: return 'Creating' else: return 'Running' return 'Processing' def __repr__(self): "for debugging purposes" try: ins = ",".join([x.name for x in self.inputs]) outs = ",".join([x.name for x in self.outputs]) except AttributeError: ins = ",".join([str(x) for x in self.inputs]) outs = ",".join([str(x) for x in self.outputs]) return "".join(['\n\t{task %r: ' % id(self), self.__class__.__name__, " ", ins, " -> ", outs, '}']) def uid(self): """ Returns an identifier used to determine if tasks are up-to-date. Since the identifier will be stored between executions, it must be: - unique for a task: no two tasks return the same value (for a given build context) - the same for a given task instance By default, the node paths, the class name, and the function are used as inputs to compute a hash. The pointer to the object (python built-in 'id') will change between build executions, and must be avoided in such hashes. :return: hash value :rtype: string """ try: return self.uid_ except AttributeError: m = Utils.md5(self.__class__.__name__) up = m.update for x in self.inputs + self.outputs: up(x.abspath()) self.uid_ = m.digest() return self.uid_ def set_inputs(self, inp): """ Appends the nodes to the *inputs* list :param inp: input nodes :type inp: node or list of nodes """ if isinstance(inp, list): self.inputs += inp else: self.inputs.append(inp) def set_outputs(self, out): """ Appends the nodes to the *outputs* list :param out: output nodes :type out: node or list of nodes """ if isinstance(out, list): self.outputs += out else: self.outputs.append(out) def set_run_after(self, task): """ Run this task only after the given *task*. Calling this method from :py:meth:`waflib.Task.Task.runnable_status` may cause build deadlocks; see :py:meth:`waflib.Tools.fc.fc.runnable_status` for details. :param task: task :type task: :py:class:`waflib.Task.Task` """ assert isinstance(task, Task) self.run_after.add(task) def signature(self): """ Task signatures are stored between build executions, they are use to track the changes made to the input nodes (not to the outputs!). The signature hashes data from various sources: * explicit dependencies: files listed in the inputs (list of node objects) :py:meth:`waflib.Task.Task.sig_explicit_deps` * implicit dependencies: list of nodes returned by scanner methods (when present) :py:meth:`waflib.Task.Task.sig_implicit_deps` * hashed data: variables/values read from task.vars/task.env :py:meth:`waflib.Task.Task.sig_vars` If the signature is expected to give a different result, clear the cache kept in ``self.cache_sig``:: from waflib import Task class cls(Task.Task): def signature(self): sig = super(Task.Task, self).signature() delattr(self, 'cache_sig') return super(Task.Task, self).signature() :return: the signature value :rtype: string or bytes """ try: return self.cache_sig except AttributeError: pass self.m = Utils.md5(self.hcode) # explicit deps self.sig_explicit_deps() # env vars self.sig_vars() # implicit deps / scanner results if self.scan: try: self.sig_implicit_deps() except Errors.TaskRescan: return self.signature() ret = self.cache_sig = self.m.digest() return ret def runnable_status(self): """ Returns the Task status :return: a task state in :py:const:`waflib.Task.RUN_ME`, :py:const:`waflib.Task.SKIP_ME`, :py:const:`waflib.Task.CANCEL_ME` or :py:const:`waflib.Task.ASK_LATER`. :rtype: int """ bld = self.generator.bld if bld.is_install < 0: return SKIP_ME for t in self.run_after: if not t.hasrun: return ASK_LATER elif t.hasrun < SKIPPED: # a dependency has an error return CANCEL_ME # first compute the signature try: new_sig = self.signature() except Errors.TaskNotReady: return ASK_LATER # compare the signature to a signature computed previously key = self.uid() try: prev_sig = bld.task_sigs[key] except KeyError: Logs.debug('task: task %r must run: it was never run before or the task code changed', self) return RUN_ME if new_sig != prev_sig: Logs.debug('task: task %r must run: the task signature changed', self) return RUN_ME # compare the signatures of the outputs for node in self.outputs: sig = bld.node_sigs.get(node) if not sig: Logs.debug('task: task %r must run: an output node has no signature', self) return RUN_ME if sig != key: Logs.debug('task: task %r must run: an output node was produced by another task', self) return RUN_ME if not node.exists(): Logs.debug('task: task %r must run: an output node does not exist', self) return RUN_ME return (self.always_run and RUN_ME) or SKIP_ME def post_run(self): """ Called after successful execution to record that the task has run by updating the entry in :py:attr:`waflib.Build.BuildContext.task_sigs`. """ bld = self.generator.bld for node in self.outputs: if not node.exists(): self.hasrun = MISSING self.err_msg = '-> missing file: %r' % node.abspath() raise Errors.WafError(self.err_msg) bld.node_sigs[node] = self.uid() # make sure this task produced the files in question bld.task_sigs[self.uid()] = self.signature() if not self.keep_last_cmd: try: del self.last_cmd except AttributeError: pass def sig_explicit_deps(self): """ Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.inputs` and :py:attr:`waflib.Task.Task.dep_nodes` signatures. """ bld = self.generator.bld upd = self.m.update # the inputs for x in self.inputs + self.dep_nodes: upd(x.get_bld_sig()) # manual dependencies, they can slow down the builds if bld.deps_man: additional_deps = bld.deps_man for x in self.inputs + self.outputs: try: d = additional_deps[x] except KeyError: continue for v in d: try: v = v.get_bld_sig() except AttributeError: if hasattr(v, '__call__'): v = v() # dependency is a function, call it upd(v) def sig_deep_inputs(self): """ Enable rebuilds on input files task signatures. Not used by default. Example: hashes of output programs can be unchanged after being re-linked, despite the libraries being different. This method can thus prevent stale unit test results (waf_unit_test.py). Hashing input file timestamps is another possibility for the implementation. This may cause unnecessary rebuilds when input tasks are frequently executed. Here is an implementation example:: lst = [] for node in self.inputs + self.dep_nodes: st = os.stat(node.abspath()) lst.append(st.st_mtime) lst.append(st.st_size) self.m.update(Utils.h_list(lst)) The downside of the implementation is that it absolutely requires all build directory files to be declared within the current build. """ bld = self.generator.bld lst = [bld.task_sigs[bld.node_sigs[node]] for node in (self.inputs + self.dep_nodes) if node.is_bld()] self.m.update(Utils.h_list(lst)) def sig_vars(self): """ Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.env` variables/values When overriding this method, and if scriptlet expressions are used, make sure to follow the code in :py:meth:`waflib.Task.Task.compile_sig_vars` to enable dependencies on scriptlet results. This method may be replaced on subclasses by the metaclass to force dependencies on scriptlet code. """ sig = self.generator.bld.hash_env_vars(self.env, self.vars) self.m.update(sig) scan = None """ This method, when provided, returns a tuple containing: * a list of nodes corresponding to real files * a list of names for files not found in path_lst For example:: from waflib.Task import Task class mytask(Task): def scan(self, node): return ([], []) The first and second lists in the tuple are stored in :py:attr:`waflib.Build.BuildContext.node_deps` and :py:attr:`waflib.Build.BuildContext.raw_deps` respectively. """ def sig_implicit_deps(self): """ Used by :py:meth:`waflib.Task.Task.signature`; it hashes node signatures obtained by scanning for dependencies (:py:meth:`waflib.Task.Task.scan`). The exception :py:class:`waflib.Errors.TaskRescan` is thrown when a file has changed. In this case, the method :py:meth:`waflib.Task.Task.signature` is called once again, and return here to call :py:meth:`waflib.Task.Task.scan` and searching for dependencies. """ bld = self.generator.bld # get the task signatures from previous runs key = self.uid() prev = bld.imp_sigs.get(key, []) # for issue #379 if prev: try: if prev == self.compute_sig_implicit_deps(): return prev except Errors.TaskNotReady: raise except EnvironmentError: # when a file was renamed, remove the stale nodes (headers in folders without source files) # this will break the order calculation for headers created during the build in the source directory (should be uncommon) # the behaviour will differ when top != out for x in bld.node_deps.get(self.uid(), []): if not x.is_bld() and not x.exists(): try: del x.parent.children[x.name] except KeyError: pass del bld.imp_sigs[key] raise Errors.TaskRescan('rescan') # no previous run or the signature of the dependencies has changed, rescan the dependencies (bld.node_deps[key], bld.raw_deps[key]) = self.scan() if Logs.verbose: Logs.debug('deps: scanner for %s: %r; unresolved: %r', self, bld.node_deps[key], bld.raw_deps[key]) # recompute the signature and return it try: bld.imp_sigs[key] = self.compute_sig_implicit_deps() except EnvironmentError: for k in bld.node_deps.get(self.uid(), []): if not k.exists(): Logs.warn('Dependency %r for %r is missing: check the task declaration and the build order!', k, self) raise def compute_sig_implicit_deps(self): """ Used by :py:meth:`waflib.Task.Task.sig_implicit_deps` for computing the actual hash of the :py:class:`waflib.Node.Node` returned by the scanner. :return: a hash value for the implicit dependencies :rtype: string or bytes """ upd = self.m.update self.are_implicit_nodes_ready() # scanner returns a node that does not have a signature # just *ignore* the error and let them figure out from the compiler output # waf -k behaviour for k in self.generator.bld.node_deps.get(self.uid(), []): upd(k.get_bld_sig()) return self.m.digest() def are_implicit_nodes_ready(self): """ For each node returned by the scanner, see if there is a task that creates it, and infer the build order This has a low performance impact on null builds (1.86s->1.66s) thanks to caching (28s->1.86s) """ bld = self.generator.bld try: cache = bld.dct_implicit_nodes except AttributeError: bld.dct_implicit_nodes = cache = {} # one cache per build group try: dct = cache[bld.current_group] except KeyError: dct = cache[bld.current_group] = {} for tsk in bld.cur_tasks: for x in tsk.outputs: dct[x] = tsk modified = False for x in bld.node_deps.get(self.uid(), []): if x in dct: self.run_after.add(dct[x]) modified = True if modified: for tsk in self.run_after: if not tsk.hasrun: #print "task is not ready..." raise Errors.TaskNotReady('not ready') if sys.hexversion > 0x3000000: def uid(self): try: return self.uid_ except AttributeError: m = Utils.md5(self.__class__.__name__.encode('latin-1', 'xmlcharrefreplace')) up = m.update for x in self.inputs + self.outputs: up(x.abspath().encode('latin-1', 'xmlcharrefreplace')) self.uid_ = m.digest() return self.uid_ uid.__doc__ = Task.uid.__doc__ Task.uid = uid def is_before(t1, t2): """ Returns a non-zero value if task t1 is to be executed before task t2:: t1.ext_out = '.h' t2.ext_in = '.h' t2.after = ['t1'] t1.before = ['t2'] waflib.Task.is_before(t1, t2) # True :param t1: Task object :type t1: :py:class:`waflib.Task.Task` :param t2: Task object :type t2: :py:class:`waflib.Task.Task` """ to_list = Utils.to_list for k in to_list(t2.ext_in): if k in to_list(t1.ext_out): return 1 if t1.__class__.__name__ in to_list(t2.after): return 1 if t2.__class__.__name__ in to_list(t1.before): return 1 return 0 def set_file_constraints(tasks): """ Updates the ``run_after`` attribute of all tasks based on the task inputs and outputs :param tasks: tasks :type tasks: list of :py:class:`waflib.Task.Task` """ ins = Utils.defaultdict(set) outs = Utils.defaultdict(set) for x in tasks: for a in x.inputs: ins[a].add(x) for a in x.dep_nodes: ins[a].add(x) for a in x.outputs: outs[a].add(x) links = set(ins.keys()).intersection(outs.keys()) for k in links: for a in ins[k]: a.run_after.update(outs[k]) class TaskGroup(object): """ Wrap nxm task order constraints into a single object to prevent the creation of large list/set objects This is an optimization """ def __init__(self, prev, next): self.prev = prev self.next = next self.done = False def get_hasrun(self): for k in self.prev: if not k.hasrun: return NOT_RUN return SUCCESS hasrun = property(get_hasrun, None) def set_precedence_constraints(tasks): """ Updates the ``run_after`` attribute of all tasks based on the after/before/ext_out/ext_in attributes :param tasks: tasks :type tasks: list of :py:class:`waflib.Task.Task` """ cstr_groups = Utils.defaultdict(list) for x in tasks: h = x.hash_constraints() cstr_groups[h].append(x) keys = list(cstr_groups.keys()) maxi = len(keys) # this list should be short for i in range(maxi): t1 = cstr_groups[keys[i]][0] for j in range(i + 1, maxi): t2 = cstr_groups[keys[j]][0] # add the constraints based on the comparisons if is_before(t1, t2): a = i b = j elif is_before(t2, t1): a = j b = i else: continue a = cstr_groups[keys[a]] b = cstr_groups[keys[b]] if len(a) < 2 or len(b) < 2: for x in b: x.run_after.update(a) else: group = TaskGroup(set(a), set(b)) for x in b: x.run_after.add(group) def funex(c): """ Compiles a scriptlet expression into a Python function :param c: function to compile :type c: string :return: the function 'f' declared in the input string :rtype: function """ dc = {} exec(c, dc) return dc['f'] re_cond = re.compile(r'(?P\w+)|(?P\|)|(?P&)') re_novar = re.compile(r'^(SRC|TGT)\W+.*?$') reg_act = re.compile(r'(?P\\)|(?P\$\$)|(?P\$\{(?P\w+)(?P.*?)\})', re.M) def compile_fun_shell(line): """ Creates a compiled function to execute a process through a sub-shell """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return '\\\\' elif g('subst'): extr.append((g('var'), g('code'))) return "%s" return None line = reg_act.sub(repl, line) or line dvars = [] def add_dvar(x): if x not in dvars: dvars.append(x) def replc(m): # performs substitutions and populates dvars if m.group('and'): return ' and ' elif m.group('or'): return ' or ' else: x = m.group('var') add_dvar(x) return 'env[%r]' % x parm = [] app = parm.append for (var, meth) in extr: if var == 'SRC': if meth: app('tsk.inputs%s' % meth) else: app('" ".join([a.path_from(cwdx) for a in tsk.inputs])') elif var == 'TGT': if meth: app('tsk.outputs%s' % meth) else: app('" ".join([a.path_from(cwdx) for a in tsk.outputs])') elif meth: if meth.startswith(':'): add_dvar(var) m = meth[1:] if m == 'SRC': m = '[a.path_from(cwdx) for a in tsk.inputs]' elif m == 'TGT': m = '[a.path_from(cwdx) for a in tsk.outputs]' elif re_novar.match(m): m = '[tsk.inputs%s]' % m[3:] elif re_novar.match(m): m = '[tsk.outputs%s]' % m[3:] else: add_dvar(m) if m[:3] not in ('tsk', 'gen', 'bld'): m = '%r' % m app('" ".join(tsk.colon(%r, %s))' % (var, m)) elif meth.startswith('?'): # In A?B|C output env.A if one of env.B or env.C is non-empty expr = re_cond.sub(replc, meth[1:]) app('p(%r) if (%s) else ""' % (var, expr)) else: call = '%s%s' % (var, meth) add_dvar(call) app(call) else: add_dvar(var) app("p('%s')" % var) if parm: parm = "%% (%s) " % (',\n\t\t'.join(parm)) else: parm = '' c = COMPILE_TEMPLATE_SHELL % (line, parm) Logs.debug('action: %s', c.strip().splitlines()) return (funex(c), dvars) reg_act_noshell = re.compile(r"(?P\s+)|(?P\$\{(?P\w+)(?P.*?)\})|(?P([^$ \t\n\r\f\v]|\$\$)+)", re.M) def compile_fun_noshell(line): """ Creates a compiled function to execute a process without a sub-shell """ buf = [] dvars = [] merge = False app = buf.append def add_dvar(x): if x not in dvars: dvars.append(x) def replc(m): # performs substitutions and populates dvars if m.group('and'): return ' and ' elif m.group('or'): return ' or ' else: x = m.group('var') add_dvar(x) return 'env[%r]' % x for m in reg_act_noshell.finditer(line): if m.group('space'): merge = False continue elif m.group('text'): app('[%r]' % m.group('text').replace('$$', '$')) elif m.group('subst'): var = m.group('var') code = m.group('code') if var == 'SRC': if code: app('[tsk.inputs%s]' % code) else: app('[a.path_from(cwdx) for a in tsk.inputs]') elif var == 'TGT': if code: app('[tsk.outputs%s]' % code) else: app('[a.path_from(cwdx) for a in tsk.outputs]') elif code: if code.startswith(':'): # a composed variable ${FOO:OUT} add_dvar(var) m = code[1:] if m == 'SRC': m = '[a.path_from(cwdx) for a in tsk.inputs]' elif m == 'TGT': m = '[a.path_from(cwdx) for a in tsk.outputs]' elif re_novar.match(m): m = '[tsk.inputs%s]' % m[3:] elif re_novar.match(m): m = '[tsk.outputs%s]' % m[3:] else: add_dvar(m) if m[:3] not in ('tsk', 'gen', 'bld'): m = '%r' % m app('tsk.colon(%r, %s)' % (var, m)) elif code.startswith('?'): # In A?B|C output env.A if one of env.B or env.C is non-empty expr = re_cond.sub(replc, code[1:]) app('to_list(env[%r] if (%s) else [])' % (var, expr)) else: # plain code such as ${tsk.inputs[0].abspath()} call = '%s%s' % (var, code) add_dvar(call) app('to_list(%s)' % call) else: # a plain variable such as # a plain variable like ${AR} app('to_list(env[%r])' % var) add_dvar(var) if merge: tmp = 'merge(%s, %s)' % (buf[-2], buf[-1]) del buf[-1] buf[-1] = tmp merge = True # next turn buf = ['lst.extend(%s)' % x for x in buf] fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf) Logs.debug('action: %s', fun.strip().splitlines()) return (funex(fun), dvars) def compile_fun(line, shell=False): """ Parses a string expression such as '${CC} ${SRC} -o ${TGT}' and returns a pair containing: * The function created (compiled) for use as :py:meth:`waflib.Task.Task.run` * The list of variables that must cause rebuilds when *env* data is modified for example:: from waflib.Task import compile_fun compile_fun('cxx', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}') def build(bld): bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"') The env variables (CXX, ..) on the task must not hold dicts so as to preserve a consistent order. The reserved keywords ``TGT`` and ``SRC`` represent the task input and output nodes """ if isinstance(line, str): if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0: shell = True else: dvars_lst = [] funs_lst = [] for x in line: if isinstance(x, str): fun, dvars = compile_fun(x, shell) dvars_lst += dvars funs_lst.append(fun) else: # assume a function to let through funs_lst.append(x) def composed_fun(task): for x in funs_lst: ret = x(task) if ret: return ret return None return composed_fun, dvars_lst if shell: return compile_fun_shell(line) else: return compile_fun_noshell(line) def compile_sig_vars(vars): """ This method produces a sig_vars method suitable for subclasses that provide scriptlet code in their run_str code. If no such method can be created, this method returns None. The purpose of the sig_vars method returned is to ensures that rebuilds occur whenever the contents of the expression changes. This is the case B below:: import time # case A: regular variables tg = bld(rule='echo ${FOO}') tg.env.FOO = '%s' % time.time() # case B bld(rule='echo ${gen.foo}', foo='%s' % time.time()) :param vars: env variables such as CXXFLAGS or gen.foo :type vars: list of string :return: A sig_vars method relevant for dependencies if adequate, else None :rtype: A function, or None in most cases """ buf = [] for x in sorted(vars): if x[:3] in ('tsk', 'gen', 'bld'): buf.append('buf.append(%s)' % x) if buf: return funex(COMPILE_TEMPLATE_SIG_VARS % '\n\t'.join(buf)) return None def task_factory(name, func=None, vars=None, color='GREEN', ext_in=[], ext_out=[], before=[], after=[], shell=False, scan=None): """ Returns a new task subclass with the function ``run`` compiled from the line given. :param func: method run :type func: string or function :param vars: list of variables to hash :type vars: list of string :param color: color to use :type color: string :param shell: when *func* is a string, enable/disable the use of the shell :type shell: bool :param scan: method scan :type scan: function :rtype: :py:class:`waflib.Task.Task` """ params = { 'vars': vars or [], # function arguments are static, and this one may be modified by the class 'color': color, 'name': name, 'shell': shell, 'scan': scan, } if isinstance(func, str) or isinstance(func, tuple): params['run_str'] = func else: params['run'] = func cls = type(Task)(name, (Task,), params) classes[name] = cls if ext_in: cls.ext_in = Utils.to_list(ext_in) if ext_out: cls.ext_out = Utils.to_list(ext_out) if before: cls.before = Utils.to_list(before) if after: cls.after = Utils.to_list(after) return cls def deep_inputs(cls): """ Task class decorator to enable rebuilds on input files task signatures """ def sig_explicit_deps(self): Task.sig_explicit_deps(self) Task.sig_deep_inputs(self) cls.sig_explicit_deps = sig_explicit_deps return cls TaskBase = Task "Provided for compatibility reasons, TaskBase should not be used" class TaskSemaphore(object): """ Task semaphores provide a simple and efficient way of throttling the amount of a particular task to run concurrently. The throttling value is capped by the amount of maximum jobs, so for example, a `TaskSemaphore(10)` has no effect in a `-j2` build. Task semaphores are typically specified on the task class level:: class compile(waflib.Task.Task): semaphore = waflib.Task.TaskSemaphore(2) run_str = 'touch ${TGT}' Task semaphores are meant to be used by the build scheduler in the main thread, so there are no guarantees of thread safety. """ def __init__(self, num): """ :param num: maximum value of concurrent tasks :type num: int """ self.num = num self.locking = set() self.waiting = set() def is_locked(self): """Returns True if this semaphore cannot be acquired by more tasks""" return len(self.locking) >= self.num def acquire(self, tsk): """ Mark the semaphore as used by the given task (not re-entrant). :param tsk: task object :type tsk: :py:class:`waflib.Task.Task` :raises: :py:class:`IndexError` in case the resource is already acquired """ if self.is_locked(): raise IndexError('Cannot lock more %r' % self.locking) self.locking.add(tsk) def release(self, tsk): """ Mark the semaphore as unused by the given task. :param tsk: task object :type tsk: :py:class:`waflib.Task.Task` :raises: :py:class:`KeyError` in case the resource is not acquired by the task """ self.locking.remove(tsk) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.2574158 tevent-0.11.0/third_party/waf/waflib/TaskGen.py0000660000000000000000000006410000000000000021314 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Task generators The class :py:class:`waflib.TaskGen.task_gen` encapsulates the creation of task objects (low-level code) The instances can have various parameters, but the creation of task nodes (Task.py) is deferred. To achieve this, various methods are called from the method "apply" """ import copy, re, os, functools from waflib import Task, Utils, Logs, Errors, ConfigSet, Node feats = Utils.defaultdict(set) """remember the methods declaring features""" HEADER_EXTS = ['.h', '.hpp', '.hxx', '.hh'] class task_gen(object): """ Instances of this class create :py:class:`waflib.Task.Task` when calling the method :py:meth:`waflib.TaskGen.task_gen.post` from the main thread. A few notes: * The methods to call (*self.meths*) can be specified dynamically (removing, adding, ..) * The 'features' are used to add methods to self.meths and then execute them * The attribute 'path' is a node representing the location of the task generator * The tasks created are added to the attribute *tasks* * The attribute 'idx' is a counter of task generators in the same path """ mappings = Utils.ordered_iter_dict() """Mappings are global file extension mappings that are retrieved in the order of definition""" prec = Utils.defaultdict(set) """Dict that holds the precedence execution rules for task generator methods""" def __init__(self, *k, **kw): """ Task generator objects predefine various attributes (source, target) for possible processing by process_rule (make-like rules) or process_source (extensions, misc methods) Tasks are stored on the attribute 'tasks'. They are created by calling methods listed in ``self.meths`` or referenced in the attribute ``features`` A topological sort is performed to execute the methods in correct order. The extra key/value elements passed in ``kw`` are set as attributes """ self.source = [] self.target = '' self.meths = [] """ List of method names to execute (internal) """ self.features = [] """ List of feature names for bringing new methods in """ self.tasks = [] """ Tasks created are added to this list """ if not 'bld' in kw: # task generators without a build context :-/ self.env = ConfigSet.ConfigSet() self.idx = 0 self.path = None else: self.bld = kw['bld'] self.env = self.bld.env.derive() self.path = kw.get('path', self.bld.path) # by default, emulate chdir when reading scripts # Provide a unique index per folder # This is part of a measure to prevent output file name collisions path = self.path.abspath() try: self.idx = self.bld.idx[path] = self.bld.idx.get(path, 0) + 1 except AttributeError: self.bld.idx = {} self.idx = self.bld.idx[path] = 1 # Record the global task generator count try: self.tg_idx_count = self.bld.tg_idx_count = self.bld.tg_idx_count + 1 except AttributeError: self.tg_idx_count = self.bld.tg_idx_count = 1 for key, val in kw.items(): setattr(self, key, val) def __str__(self): """Debugging helper""" return "" % (self.name, self.path.abspath()) def __repr__(self): """Debugging helper""" lst = [] for x in self.__dict__: if x not in ('env', 'bld', 'compiled_tasks', 'tasks'): lst.append("%s=%s" % (x, repr(getattr(self, x)))) return "bld(%s) in %s" % (", ".join(lst), self.path.abspath()) def get_cwd(self): """ Current working directory for the task generator, defaults to the build directory. This is still used in a few places but it should disappear at some point as the classes define their own working directory. :rtype: :py:class:`waflib.Node.Node` """ return self.bld.bldnode def get_name(self): """ If the attribute ``name`` is not set on the instance, the name is computed from the target name:: def build(bld): x = bld(name='foo') x.get_name() # foo y = bld(target='bar') y.get_name() # bar :rtype: string :return: name of this task generator """ try: return self._name except AttributeError: if isinstance(self.target, list): lst = [str(x) for x in self.target] name = self._name = ','.join(lst) else: name = self._name = str(self.target) return name def set_name(self, name): self._name = name name = property(get_name, set_name) def to_list(self, val): """ Ensures that a parameter is a list, see :py:func:`waflib.Utils.to_list` :type val: string or list of string :param val: input to return as a list :rtype: list """ if isinstance(val, str): return val.split() else: return val def post(self): """ Creates tasks for this task generators. The following operations are performed: #. The body of this method is called only once and sets the attribute ``posted`` #. The attribute ``features`` is used to add more methods in ``self.meths`` #. The methods are sorted by the precedence table ``self.prec`` or `:waflib:attr:waflib.TaskGen.task_gen.prec` #. The methods are then executed in order #. The tasks created are added to :py:attr:`waflib.TaskGen.task_gen.tasks` """ if getattr(self, 'posted', None): return False self.posted = True keys = set(self.meths) keys.update(feats['*']) # add the methods listed in the features self.features = Utils.to_list(self.features) for x in self.features: st = feats[x] if st: keys.update(st) elif not x in Task.classes: Logs.warn('feature %r does not exist - bind at least one method to it?', x) # copy the precedence table prec = {} prec_tbl = self.prec for x in prec_tbl: if x in keys: prec[x] = prec_tbl[x] # elements disconnected tmp = [] for a in keys: for x in prec.values(): if a in x: break else: tmp.append(a) tmp.sort(reverse=True) # topological sort out = [] while tmp: e = tmp.pop() if e in keys: out.append(e) try: nlst = prec[e] except KeyError: pass else: del prec[e] for x in nlst: for y in prec: if x in prec[y]: break else: tmp.append(x) tmp.sort(reverse=True) if prec: buf = ['Cycle detected in the method execution:'] for k, v in prec.items(): buf.append('- %s after %s' % (k, [x for x in v if x in prec])) raise Errors.WafError('\n'.join(buf)) self.meths = out # then we run the methods in order Logs.debug('task_gen: posting %s %d', self, id(self)) for x in out: try: v = getattr(self, x) except AttributeError: raise Errors.WafError('%r is not a valid task generator method' % x) Logs.debug('task_gen: -> %s (%d)', x, id(self)) v() Logs.debug('task_gen: posted %s', self.name) return True def get_hook(self, node): """ Returns the ``@extension`` method to call for a Node of a particular extension. :param node: Input file to process :type node: :py:class:`waflib.Tools.Node.Node` :return: A method able to process the input node by looking at the extension :rtype: function """ name = node.name for k in self.mappings: try: if name.endswith(k): return self.mappings[k] except TypeError: # regexps objects if k.match(name): return self.mappings[k] keys = list(self.mappings.keys()) raise Errors.WafError("File %r has no mapping in %r (load a waf tool?)" % (node, keys)) def create_task(self, name, src=None, tgt=None, **kw): """ Creates task instances. :param name: task class name :type name: string :param src: input nodes :type src: list of :py:class:`waflib.Tools.Node.Node` :param tgt: output nodes :type tgt: list of :py:class:`waflib.Tools.Node.Node` :return: A task object :rtype: :py:class:`waflib.Task.Task` """ task = Task.classes[name](env=self.env.derive(), generator=self) if src: task.set_inputs(src) if tgt: task.set_outputs(tgt) task.__dict__.update(kw) self.tasks.append(task) return task def clone(self, env): """ Makes a copy of a task generator. Once the copy is made, it is necessary to ensure that the it does not create the same output files as the original, or the same files may be compiled several times. :param env: A configuration set :type env: :py:class:`waflib.ConfigSet.ConfigSet` :return: A copy :rtype: :py:class:`waflib.TaskGen.task_gen` """ newobj = self.bld() for x in self.__dict__: if x in ('env', 'bld'): continue elif x in ('path', 'features'): setattr(newobj, x, getattr(self, x)) else: setattr(newobj, x, copy.copy(getattr(self, x))) newobj.posted = False if isinstance(env, str): newobj.env = self.bld.all_envs[env].derive() else: newobj.env = env.derive() return newobj def declare_chain(name='', rule=None, reentrant=None, color='BLUE', ext_in=[], ext_out=[], before=[], after=[], decider=None, scan=None, install_path=None, shell=False): """ Creates a new mapping and a task class for processing files by extension. See Tools/flex.py for an example. :param name: name for the task class :type name: string :param rule: function to execute or string to be compiled in a function :type rule: string or function :param reentrant: re-inject the output file in the process (done automatically, set to 0 to disable) :type reentrant: int :param color: color for the task output :type color: string :param ext_in: execute the task only after the files of such extensions are created :type ext_in: list of string :param ext_out: execute the task only before files of such extensions are processed :type ext_out: list of string :param before: execute instances of this task before classes of the given names :type before: list of string :param after: execute instances of this task after classes of the given names :type after: list of string :param decider: if present, function that returns a list of output file extensions (overrides ext_out for output files, but not for the build order) :type decider: function :param scan: scanner function for the task :type scan: function :param install_path: installation path for the output nodes :type install_path: string """ ext_in = Utils.to_list(ext_in) ext_out = Utils.to_list(ext_out) if not name: name = rule cls = Task.task_factory(name, rule, color=color, ext_in=ext_in, ext_out=ext_out, before=before, after=after, scan=scan, shell=shell) def x_file(self, node): if ext_in: _ext_in = ext_in[0] tsk = self.create_task(name, node) cnt = 0 ext = decider(self, node) if decider else cls.ext_out for x in ext: k = node.change_ext(x, ext_in=_ext_in) tsk.outputs.append(k) if reentrant != None: if cnt < int(reentrant): self.source.append(k) else: # reinject downstream files into the build for y in self.mappings: # ~ nfile * nextensions :-/ if k.name.endswith(y): self.source.append(k) break cnt += 1 if install_path: self.install_task = self.add_install_files(install_to=install_path, install_from=tsk.outputs) return tsk for x in cls.ext_in: task_gen.mappings[x] = x_file return x_file def taskgen_method(func): """ Decorator that registers method as a task generator method. The function must accept a task generator as first parameter:: from waflib.TaskGen import taskgen_method @taskgen_method def mymethod(self): pass :param func: task generator method to add :type func: function :rtype: function """ setattr(task_gen, func.__name__, func) return func def feature(*k): """ Decorator that registers a task generator method that will be executed when the object attribute ``feature`` contains the corresponding key(s):: from waflib.Task import feature @feature('myfeature') def myfunction(self): print('that is my feature!') def build(bld): bld(features='myfeature') :param k: feature names :type k: list of string """ def deco(func): setattr(task_gen, func.__name__, func) for name in k: feats[name].update([func.__name__]) return func return deco def before_method(*k): """ Decorator that registera task generator method which will be executed before the functions of given name(s):: from waflib.TaskGen import feature, before @feature('myfeature') @before_method('fun2') def fun1(self): print('feature 1!') @feature('myfeature') def fun2(self): print('feature 2!') def build(bld): bld(features='myfeature') :param k: method names :type k: list of string """ def deco(func): setattr(task_gen, func.__name__, func) for fun_name in k: task_gen.prec[func.__name__].add(fun_name) return func return deco before = before_method def after_method(*k): """ Decorator that registers a task generator method which will be executed after the functions of given name(s):: from waflib.TaskGen import feature, after @feature('myfeature') @after_method('fun2') def fun1(self): print('feature 1!') @feature('myfeature') def fun2(self): print('feature 2!') def build(bld): bld(features='myfeature') :param k: method names :type k: list of string """ def deco(func): setattr(task_gen, func.__name__, func) for fun_name in k: task_gen.prec[fun_name].add(func.__name__) return func return deco after = after_method def extension(*k): """ Decorator that registers a task generator method which will be invoked during the processing of source files for the extension given:: from waflib import Task class mytask(Task): run_str = 'cp ${SRC} ${TGT}' @extension('.moo') def create_maa_file(self, node): self.create_task('mytask', node, node.change_ext('.maa')) def build(bld): bld(source='foo.moo') """ def deco(func): setattr(task_gen, func.__name__, func) for x in k: task_gen.mappings[x] = func return func return deco @taskgen_method def to_nodes(self, lst, path=None): """ Flatten the input list of string/nodes/lists into a list of nodes. It is used by :py:func:`waflib.TaskGen.process_source` and :py:func:`waflib.TaskGen.process_rule`. It is designed for source files, for folders, see :py:func:`waflib.Tools.ccroot.to_incnodes`: :param lst: input list :type lst: list of string and nodes :param path: path from which to search the nodes (by default, :py:attr:`waflib.TaskGen.task_gen.path`) :type path: :py:class:`waflib.Tools.Node.Node` :rtype: list of :py:class:`waflib.Tools.Node.Node` """ tmp = [] path = path or self.path find = path.find_resource if isinstance(lst, Node.Node): lst = [lst] for x in Utils.to_list(lst): if isinstance(x, str): node = find(x) elif hasattr(x, 'name'): node = x else: tmp.extend(self.to_nodes(x)) continue if not node: raise Errors.WafError('source not found: %r in %r' % (x, self)) tmp.append(node) return tmp @feature('*') def process_source(self): """ Processes each element in the attribute ``source`` by extension. #. The *source* list is converted through :py:meth:`waflib.TaskGen.to_nodes` to a list of :py:class:`waflib.Node.Node` first. #. File extensions are mapped to methods having the signature: ``def meth(self, node)`` by :py:meth:`waflib.TaskGen.extension` #. The method is retrieved through :py:meth:`waflib.TaskGen.task_gen.get_hook` #. When called, the methods may modify self.source to append more source to process #. The mappings can map an extension or a filename (see the code below) """ self.source = self.to_nodes(getattr(self, 'source', [])) for node in self.source: self.get_hook(node)(self, node) @feature('*') @before_method('process_source') def process_rule(self): """ Processes the attribute ``rule``. When present, :py:meth:`waflib.TaskGen.process_source` is disabled:: def build(bld): bld(rule='cp ${SRC} ${TGT}', source='wscript', target='bar.txt') Main attributes processed: * rule: command to execute, it can be a tuple of strings for multiple commands * chmod: permissions for the resulting files (integer value such as Utils.O755) * shell: set to False to execute the command directly (default is True to use a shell) * scan: scanner function * vars: list of variables to trigger rebuilds, such as CFLAGS * cls_str: string to display when executing the task * cls_keyword: label to display when executing the task * cache_rule: by default, try to re-use similar classes, set to False to disable * source: list of Node or string objects representing the source files required by this task * target: list of Node or string objects representing the files that this task creates * cwd: current working directory (Node or string) * stdout: standard output, set to None to prevent waf from capturing the text * stderr: standard error, set to None to prevent waf from capturing the text * timeout: timeout for command execution (Python 3) * always: whether to always run the command (False by default) * deep_inputs: whether the task must depend on the input file tasks too (False by default) """ if not getattr(self, 'rule', None): return # create the task class name = str(getattr(self, 'name', None) or self.target or getattr(self.rule, '__name__', self.rule)) # or we can put the class in a cache for performance reasons try: cache = self.bld.cache_rule_attr except AttributeError: cache = self.bld.cache_rule_attr = {} chmod = getattr(self, 'chmod', None) shell = getattr(self, 'shell', True) color = getattr(self, 'color', 'BLUE') scan = getattr(self, 'scan', None) _vars = getattr(self, 'vars', []) cls_str = getattr(self, 'cls_str', None) cls_keyword = getattr(self, 'cls_keyword', None) use_cache = getattr(self, 'cache_rule', 'True') deep_inputs = getattr(self, 'deep_inputs', False) scan_val = has_deps = hasattr(self, 'deps') if scan: scan_val = id(scan) key = Utils.h_list((name, self.rule, chmod, shell, color, cls_str, cls_keyword, scan_val, _vars, deep_inputs)) cls = None if use_cache: try: cls = cache[key] except KeyError: pass if not cls: rule = self.rule if chmod is not None: def chmod_fun(tsk): for x in tsk.outputs: os.chmod(x.abspath(), tsk.generator.chmod) if isinstance(rule, tuple): rule = list(rule) rule.append(chmod_fun) rule = tuple(rule) else: rule = (rule, chmod_fun) cls = Task.task_factory(name, rule, _vars, shell=shell, color=color) if cls_str: setattr(cls, '__str__', self.cls_str) if cls_keyword: setattr(cls, 'keyword', self.cls_keyword) if deep_inputs: Task.deep_inputs(cls) if scan: cls.scan = self.scan elif has_deps: def scan(self): nodes = [] for x in self.generator.to_list(getattr(self.generator, 'deps', None)): node = self.generator.path.find_resource(x) if not node: self.generator.bld.fatal('Could not find %r (was it declared?)' % x) nodes.append(node) return [nodes, []] cls.scan = scan if use_cache: cache[key] = cls # now create one instance tsk = self.create_task(name) for x in ('after', 'before', 'ext_in', 'ext_out'): setattr(tsk, x, getattr(self, x, [])) if hasattr(self, 'stdout'): tsk.stdout = self.stdout if hasattr(self, 'stderr'): tsk.stderr = self.stderr if getattr(self, 'timeout', None): tsk.timeout = self.timeout if getattr(self, 'always', None): tsk.always_run = True if getattr(self, 'target', None): if isinstance(self.target, str): self.target = self.target.split() if not isinstance(self.target, list): self.target = [self.target] for x in self.target: if isinstance(x, str): tsk.outputs.append(self.path.find_or_declare(x)) else: x.parent.mkdir() # if a node was given, create the required folders tsk.outputs.append(x) if getattr(self, 'install_path', None): self.install_task = self.add_install_files(install_to=self.install_path, install_from=tsk.outputs, chmod=getattr(self, 'chmod', Utils.O644)) if getattr(self, 'source', None): tsk.inputs = self.to_nodes(self.source) # bypass the execution of process_source by setting the source to an empty list self.source = [] if getattr(self, 'cwd', None): tsk.cwd = self.cwd if isinstance(tsk.run, functools.partial): # Python documentation says: "partial objects defined in classes # behave like static methods and do not transform into bound # methods during instance attribute look-up." tsk.run = functools.partial(tsk.run, tsk) @feature('seq') def sequence_order(self): """ Adds a strict sequential constraint between the tasks generated by task generators. It works because task generators are posted in order. It will not post objects which belong to other folders. Example:: bld(features='javac seq') bld(features='jar seq') To start a new sequence, set the attribute seq_start, for example:: obj = bld(features='seq') obj.seq_start = True Note that the method is executed in last position. This is more an example than a widely-used solution. """ if self.meths and self.meths[-1] != 'sequence_order': self.meths.append('sequence_order') return if getattr(self, 'seq_start', None): return # all the tasks previously declared must be run before these if getattr(self.bld, 'prev', None): self.bld.prev.post() for x in self.bld.prev.tasks: for y in self.tasks: y.set_run_after(x) self.bld.prev = self re_m4 = re.compile(r'@(\w+)@', re.M) class subst_pc(Task.Task): """ Creates *.pc* files from *.pc.in*. The task is executed whenever an input variable used in the substitution changes. """ def force_permissions(self): "Private for the time being, we will probably refactor this into run_str=[run1,chmod]" if getattr(self.generator, 'chmod', None): for x in self.outputs: os.chmod(x.abspath(), self.generator.chmod) def run(self): "Substitutes variables in a .in file" if getattr(self.generator, 'is_copy', None): for i, x in enumerate(self.outputs): x.write(self.inputs[i].read('rb'), 'wb') stat = os.stat(self.inputs[i].abspath()) # Preserve mtime of the copy os.utime(self.outputs[i].abspath(), (stat.st_atime, stat.st_mtime)) self.force_permissions() return None if getattr(self.generator, 'fun', None): ret = self.generator.fun(self) if not ret: self.force_permissions() return ret code = self.inputs[0].read(encoding=getattr(self.generator, 'encoding', 'latin-1')) if getattr(self.generator, 'subst_fun', None): code = self.generator.subst_fun(self, code) if code is not None: self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1')) self.force_permissions() return None # replace all % by %% to prevent errors by % signs code = code.replace('%', '%%') # extract the vars foo into lst and replace @foo@ by %(foo)s lst = [] def repl(match): g = match.group if g(1): lst.append(g(1)) return "%%(%s)s" % g(1) return '' code = getattr(self.generator, 're_m4', re_m4).sub(repl, code) try: d = self.generator.dct except AttributeError: d = {} for x in lst: tmp = getattr(self.generator, x, '') or self.env[x] or self.env[x.upper()] try: tmp = ''.join(tmp) except TypeError: tmp = str(tmp) d[x] = tmp code = code % d self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1')) self.generator.bld.raw_deps[self.uid()] = lst # make sure the signature is updated try: delattr(self, 'cache_sig') except AttributeError: pass self.force_permissions() def sig_vars(self): """ Compute a hash (signature) of the variables used in the substitution """ bld = self.generator.bld env = self.env upd = self.m.update if getattr(self.generator, 'fun', None): upd(Utils.h_fun(self.generator.fun).encode()) if getattr(self.generator, 'subst_fun', None): upd(Utils.h_fun(self.generator.subst_fun).encode()) # raw_deps: persistent custom values returned by the scanner vars = self.generator.bld.raw_deps.get(self.uid(), []) # hash both env vars and task generator attributes act_sig = bld.hash_env_vars(env, vars) upd(act_sig) lst = [getattr(self.generator, x, '') for x in vars] upd(Utils.h_list(lst)) return self.m.digest() @extension('.pc.in') def add_pcfile(self, node): """ Processes *.pc.in* files to *.pc*. Installs the results to ``${PREFIX}/lib/pkgconfig/`` by default def build(bld): bld(source='foo.pc.in', install_path='${LIBDIR}/pkgconfig/') """ tsk = self.create_task('subst_pc', node, node.change_ext('.pc', '.pc.in')) self.install_task = self.add_install_files( install_to=getattr(self, 'install_path', '${LIBDIR}/pkgconfig/'), install_from=tsk.outputs) class subst(subst_pc): pass @feature('subst') @before_method('process_source', 'process_rule') def process_subst(self): """ Defines a transformation that substitutes the contents of *source* files to *target* files:: def build(bld): bld( features='subst', source='foo.c.in', target='foo.c', install_path='${LIBDIR}/pkgconfig', VAR = 'val' ) The input files are supposed to contain macros of the form *@VAR@*, where *VAR* is an argument of the task generator object. This method overrides the processing by :py:meth:`waflib.TaskGen.process_source`. """ src = Utils.to_list(getattr(self, 'source', [])) if isinstance(src, Node.Node): src = [src] tgt = Utils.to_list(getattr(self, 'target', [])) if isinstance(tgt, Node.Node): tgt = [tgt] if len(src) != len(tgt): raise Errors.WafError('invalid number of source/target for %r' % self) for x, y in zip(src, tgt): if not x or not y: raise Errors.WafError('null source or target for %r' % self) a, b = None, None if isinstance(x, str) and isinstance(y, str) and x == y: a = self.path.find_node(x) b = self.path.get_bld().make_node(y) if not os.path.isfile(b.abspath()): b.parent.mkdir() else: if isinstance(x, str): a = self.path.find_resource(x) elif isinstance(x, Node.Node): a = x if isinstance(y, str): b = self.path.find_or_declare(y) elif isinstance(y, Node.Node): b = y if not a: raise Errors.WafError('could not find %r for %r' % (x, self)) tsk = self.create_task('subst', a, b) for k in ('after', 'before', 'ext_in', 'ext_out'): val = getattr(self, k, None) if val: setattr(tsk, k, val) # paranoid safety measure for the general case foo.in->foo.h with ambiguous dependencies for xt in HEADER_EXTS: if b.name.endswith(xt): tsk.ext_out = tsk.ext_out + ['.h'] break inst_to = getattr(self, 'install_path', None) if inst_to: self.install_task = self.add_install_files(install_to=inst_to, install_from=b, chmod=getattr(self, 'chmod', Utils.O644)) self.source = [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Tools/__init__.py0000660000000000000000000000010700000000000022614 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Tools/ar.py0000660000000000000000000000117200000000000021462 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) """ The **ar** program creates static libraries. This tool is almost always loaded from others (C, C++, D, etc) for static library support. """ from waflib.Configure import conf @conf def find_ar(conf): """Configuration helper used by C/C++ tools to enable the support for static libraries""" conf.load('ar') def configure(conf): """Finds the ar program and sets the default flags in ``conf.env.ARFLAGS``""" conf.find_program('ar', var='AR') conf.add_os_flags('ARFLAGS') if not conf.env.ARFLAGS: conf.env.ARFLAGS = ['rcs'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/Tools/asm.py0000660000000000000000000000520500000000000021641 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) """ Assembly support, used by tools such as gas and nasm To declare targets using assembly:: def configure(conf): conf.load('gcc gas') def build(bld): bld( features='c cstlib asm', source = 'test.S', target = 'asmtest') bld( features='asm asmprogram', source = 'test.S', target = 'asmtest') Support for pure asm programs and libraries should also work:: def configure(conf): conf.load('nasm') conf.find_program('ld', 'ASLINK') def build(bld): bld( features='asm asmprogram', source = 'test.S', target = 'asmtest') """ import re from waflib import Errors, Logs, Task from waflib.Tools.ccroot import link_task, stlink_task from waflib.TaskGen import extension from waflib.Tools import c_preproc re_lines = re.compile( '^[ \t]*(?:%)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef)[ \t]*(.*)\r*$', re.IGNORECASE | re.MULTILINE) class asm_parser(c_preproc.c_parser): def filter_comments(self, node): code = node.read() code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) return re_lines.findall(code) class asm(Task.Task): """ Compiles asm files by gas/nasm/yasm/... """ color = 'BLUE' run_str = '${AS} ${ASFLAGS} ${ASMPATH_ST:INCPATHS} ${ASMDEFINES_ST:DEFINES} ${AS_SRC_F}${SRC} ${AS_TGT_F}${TGT}' def scan(self): if self.env.ASM_NAME == 'gas': return c_preproc.scan(self) elif self.env.ASM_NAME == 'nasm': Logs.warn('The Nasm dependency scanner is incomplete!') try: incn = self.generator.includes_nodes except AttributeError: raise Errors.WafError('%r is missing the "asm" feature' % self.generator) if c_preproc.go_absolute: nodepaths = incn else: nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)] tmp = asm_parser(nodepaths) tmp.start(self.inputs[0], self.env) return (tmp.nodes, tmp.names) @extension('.s', '.S', '.asm', '.ASM', '.spp', '.SPP') def asm_hook(self, node): """ Binds the asm extension to the asm task :param node: input file :type node: :py:class:`waflib.Node.Node` """ return self.create_compiled_task('asm', node) class asmprogram(link_task): "Links object files into a c program" run_str = '${ASLINK} ${ASLINKFLAGS} ${ASLNK_TGT_F}${TGT} ${ASLNK_SRC_F}${SRC}' ext_out = ['.bin'] inst_to = '${BINDIR}' class asmshlib(asmprogram): "Links object files into a c shared library" inst_to = '${LIBDIR}' class asmstlib(stlink_task): "Links object files into a c static library" pass # do not remove def configure(conf): conf.env.ASMPATH_ST = '-I%s' conf.env.ASMDEFINES_ST = '-D%s' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Tools/bison.py0000660000000000000000000000224300000000000022172 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy 2009-2018 (ita) """ The **bison** program is a code generator which creates C or C++ files. The generated files are compiled into object files. """ from waflib import Task from waflib.TaskGen import extension class bison(Task.Task): """Compiles bison files""" color = 'BLUE' run_str = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}' ext_out = ['.h'] # just to make sure @extension('.y', '.yc', '.yy') def big_bison(self, node): """ Creates a bison task, which must be executed from the directory of the output file. """ has_h = '-d' in self.env.BISONFLAGS outs = [] if node.name.endswith('.yc'): outs.append(node.change_ext('.tab.cc')) if has_h: outs.append(node.change_ext('.tab.hh')) else: outs.append(node.change_ext('.tab.c')) if has_h: outs.append(node.change_ext('.tab.h')) tsk = self.create_task('bison', node, outs) tsk.cwd = node.parent.get_bld() # and the c/cxx file must be compiled too self.source.append(outs[0]) def configure(conf): """ Detects the *bison* program """ conf.find_program('bison', var='BISON') conf.env.BISONFLAGS = ['-d'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Tools/c.py0000660000000000000000000000277100000000000021310 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) "Base for c programs/libraries" from waflib import TaskGen, Task from waflib.Tools import c_preproc from waflib.Tools.ccroot import link_task, stlink_task @TaskGen.extension('.c') def c_hook(self, node): "Binds the c file extensions create :py:class:`waflib.Tools.c.c` instances" if not self.env.CC and self.env.CXX: return self.create_compiled_task('cxx', node) return self.create_compiled_task('c', node) class c(Task.Task): "Compiles C files into object files" run_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' vars = ['CCDEPS'] # unused variable to depend on, just in case ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] scan = c_preproc.scan class cprogram(link_task): "Links object files into c programs" run_str = '${LINK_CC} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}' ext_out = ['.bin'] vars = ['LINKDEPS'] inst_to = '${BINDIR}' class cshlib(cprogram): "Links object files into c shared libraries" inst_to = '${LIBDIR}' class cstlib(stlink_task): "Links object files into a c static libraries" pass # do not remove ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/c_aliases.py0000660000000000000000000000677400000000000023020 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2015 (ita) "base for all c/c++ programs and libraries" from waflib import Utils, Errors from waflib.Configure import conf def get_extensions(lst): """ Returns the file extensions for the list of files given as input :param lst: files to process :list lst: list of string or :py:class:`waflib.Node.Node` :return: list of file extensions :rtype: list of string """ ret = [] for x in Utils.to_list(lst): if not isinstance(x, str): x = x.name ret.append(x[x.rfind('.') + 1:]) return ret def sniff_features(**kw): """ Computes and returns the features required for a task generator by looking at the file extensions. This aimed for C/C++ mainly:: snif_features(source=['foo.c', 'foo.cxx'], type='shlib') # returns ['cxx', 'c', 'cxxshlib', 'cshlib'] :param source: source files to process :type source: list of string or :py:class:`waflib.Node.Node` :param type: object type in *program*, *shlib* or *stlib* :type type: string :return: the list of features for a task generator processing the source files :rtype: list of string """ exts = get_extensions(kw.get('source', [])) typ = kw['typ'] feats = [] # watch the order, cxx will have the precedence for x in 'cxx cpp c++ cc C'.split(): if x in exts: feats.append('cxx') break if 'c' in exts or 'vala' in exts or 'gs' in exts: feats.append('c') if 's' in exts or 'S' in exts: feats.append('asm') for x in 'f f90 F F90 for FOR'.split(): if x in exts: feats.append('fc') break if 'd' in exts: feats.append('d') if 'java' in exts: feats.append('java') return 'java' if typ in ('program', 'shlib', 'stlib'): will_link = False for x in feats: if x in ('cxx', 'd', 'fc', 'c', 'asm'): feats.append(x + typ) will_link = True if not will_link and not kw.get('features', []): raise Errors.WafError('Unable to determine how to link %r, try adding eg: features="c cshlib"?' % kw) return feats def set_features(kw, typ): """ Inserts data in the input dict *kw* based on existing data and on the type of target required (typ). :param kw: task generator parameters :type kw: dict :param typ: type of target :type typ: string """ kw['typ'] = typ kw['features'] = Utils.to_list(kw.get('features', [])) + Utils.to_list(sniff_features(**kw)) @conf def program(bld, *k, **kw): """ Alias for creating programs by looking at the file extensions:: def build(bld): bld.program(source='foo.c', target='app') # equivalent to: # bld(features='c cprogram', source='foo.c', target='app') """ set_features(kw, 'program') return bld(*k, **kw) @conf def shlib(bld, *k, **kw): """ Alias for creating shared libraries by looking at the file extensions:: def build(bld): bld.shlib(source='foo.c', target='app') # equivalent to: # bld(features='c cshlib', source='foo.c', target='app') """ set_features(kw, 'shlib') return bld(*k, **kw) @conf def stlib(bld, *k, **kw): """ Alias for creating static libraries by looking at the file extensions:: def build(bld): bld.stlib(source='foo.cpp', target='app') # equivalent to: # bld(features='cxx cxxstlib', source='foo.cpp', target='app') """ set_features(kw, 'stlib') return bld(*k, **kw) @conf def objects(bld, *k, **kw): """ Alias for creating object files by looking at the file extensions:: def build(bld): bld.objects(source='foo.c', target='app') # equivalent to: # bld(features='c', source='foo.c', target='app') """ set_features(kw, 'objects') return bld(*k, **kw) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/Tools/c_config.py0000660000000000000000000012176100000000000022636 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ C/C++/D configuration helpers """ from __future__ import with_statement import os, re, shlex from waflib import Build, Utils, Task, Options, Logs, Errors, Runner from waflib.TaskGen import after_method, feature from waflib.Configure import conf WAF_CONFIG_H = 'config.h' """default name for the config.h file""" DEFKEYS = 'define_key' INCKEYS = 'include_key' SNIP_EMPTY_PROGRAM = ''' int main(int argc, char **argv) { (void)argc; (void)argv; return 0; } ''' MACRO_TO_DESTOS = { '__linux__' : 'linux', '__GNU__' : 'gnu', # hurd '__FreeBSD__' : 'freebsd', '__NetBSD__' : 'netbsd', '__OpenBSD__' : 'openbsd', '__sun' : 'sunos', '__hpux' : 'hpux', '__sgi' : 'irix', '_AIX' : 'aix', '__CYGWIN__' : 'cygwin', '__MSYS__' : 'cygwin', '_UWIN' : 'uwin', '_WIN64' : 'win32', '_WIN32' : 'win32', # Note about darwin: this is also tested with 'defined __APPLE__ && defined __MACH__' somewhere below in this file. '__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__' : 'darwin', '__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__' : 'darwin', # iphone '__QNX__' : 'qnx', '__native_client__' : 'nacl' # google native client platform } MACRO_TO_DEST_CPU = { '__x86_64__' : 'x86_64', '__amd64__' : 'x86_64', '__i386__' : 'x86', '__ia64__' : 'ia', '__mips__' : 'mips', '__sparc__' : 'sparc', '__alpha__' : 'alpha', '__aarch64__' : 'aarch64', '__thumb__' : 'thumb', '__arm__' : 'arm', '__hppa__' : 'hppa', '__powerpc__' : 'powerpc', '__ppc__' : 'powerpc', '__convex__' : 'convex', '__m68k__' : 'm68k', '__s390x__' : 's390x', '__s390__' : 's390', '__sh__' : 'sh', '__xtensa__' : 'xtensa', '__e2k__' : 'e2k', } @conf def parse_flags(self, line, uselib_store, env=None, force_static=False, posix=None): """ Parses flags from the input lines, and adds them to the relevant use variables:: def configure(conf): conf.parse_flags('-O3', 'FOO') # conf.env.CXXFLAGS_FOO = ['-O3'] # conf.env.CFLAGS_FOO = ['-O3'] :param line: flags :type line: string :param uselib_store: where to add the flags :type uselib_store: string :param env: config set or conf.env by default :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param force_static: force usage of static libraries :type force_static: bool default False :param posix: usage of POSIX mode for shlex lexical analiysis library :type posix: bool default True """ assert(isinstance(line, str)) env = env or self.env # Issue 811 and 1371 if posix is None: posix = True if '\\' in line: posix = ('\\ ' in line) or ('\\\\' in line) lex = shlex.shlex(line, posix=posix) lex.whitespace_split = True lex.commenters = '' lst = list(lex) so_re = re.compile(r"\.so(?:\.[0-9]+)*$") # append_unique is not always possible # for example, apple flags may require both -arch i386 and -arch ppc uselib = uselib_store def app(var, val): env.append_value('%s_%s' % (var, uselib), val) def appu(var, val): env.append_unique('%s_%s' % (var, uselib), val) static = False while lst: x = lst.pop(0) st = x[:2] ot = x[2:] if st == '-I' or st == '/I': if not ot: ot = lst.pop(0) appu('INCLUDES', ot) elif st == '-i': tmp = [x, lst.pop(0)] app('CFLAGS', tmp) app('CXXFLAGS', tmp) elif st == '-D' or (env.CXX_NAME == 'msvc' and st == '/D'): # not perfect but.. if not ot: ot = lst.pop(0) app('DEFINES', ot) elif st == '-l': if not ot: ot = lst.pop(0) prefix = 'STLIB' if (force_static or static) else 'LIB' app(prefix, ot) elif st == '-L': if not ot: ot = lst.pop(0) prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' appu(prefix, ot) elif x.startswith('/LIBPATH:'): prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' appu(prefix, x.replace('/LIBPATH:', '')) elif x.startswith('-std='): prefix = 'CXXFLAGS' if '++' in x else 'CFLAGS' app(prefix, x) elif x.startswith('+') or x in ('-pthread', '-fPIC', '-fpic', '-fPIE', '-fpie', '-flto', '-fno-lto'): app('CFLAGS', x) app('CXXFLAGS', x) app('LINKFLAGS', x) elif x == '-framework': appu('FRAMEWORK', lst.pop(0)) elif x.startswith('-F'): appu('FRAMEWORKPATH', x[2:]) elif x == '-Wl,-rpath' or x == '-Wl,-R': app('RPATH', lst.pop(0).lstrip('-Wl,')) elif x.startswith('-Wl,-R,'): app('RPATH', x[7:]) elif x.startswith('-Wl,-R'): app('RPATH', x[6:]) elif x.startswith('-Wl,-rpath,'): app('RPATH', x[11:]) elif x == '-Wl,-Bstatic' or x == '-Bstatic': static = True elif x == '-Wl,-Bdynamic' or x == '-Bdynamic': static = False elif x.startswith('-Wl') or x in ('-rdynamic', '-pie'): app('LINKFLAGS', x) elif x.startswith(('-m', '-f', '-dynamic', '-O', '-g')): # Adding the -W option breaks python builds on Openindiana app('CFLAGS', x) app('CXXFLAGS', x) elif x.startswith('-bundle'): app('LINKFLAGS', x) elif x.startswith(('-undefined', '-Xlinker')): arg = lst.pop(0) app('LINKFLAGS', [x, arg]) elif x.startswith(('-arch', '-isysroot')): tmp = [x, lst.pop(0)] app('CFLAGS', tmp) app('CXXFLAGS', tmp) app('LINKFLAGS', tmp) elif x.endswith(('.a', '.dylib', '.lib')) or so_re.search(x): appu('LINKFLAGS', x) # not cool, #762 else: self.to_log('Unhandled flag %r' % x) @conf def validate_cfg(self, kw): """ Searches for the program *pkg-config* if missing, and validates the parameters to pass to :py:func:`waflib.Tools.c_config.exec_cfg`. :param path: the **-config program to use** (default is *pkg-config*) :type path: list of string :param msg: message to display to describe the test executed :type msg: string :param okmsg: message to display when the test is successful :type okmsg: string :param errmsg: message to display in case of error :type errmsg: string """ if not 'path' in kw: if not self.env.PKGCONFIG: self.find_program('pkg-config', var='PKGCONFIG') kw['path'] = self.env.PKGCONFIG # verify that exactly one action is requested s = ('atleast_pkgconfig_version' in kw) + ('modversion' in kw) + ('package' in kw) if s != 1: raise ValueError('exactly one of atleast_pkgconfig_version, modversion and package must be set') if not 'msg' in kw: if 'atleast_pkgconfig_version' in kw: kw['msg'] = 'Checking for pkg-config version >= %r' % kw['atleast_pkgconfig_version'] elif 'modversion' in kw: kw['msg'] = 'Checking for %r version' % kw['modversion'] else: kw['msg'] = 'Checking for %r' %(kw['package']) # let the modversion check set the okmsg to the detected version if not 'okmsg' in kw and not 'modversion' in kw: kw['okmsg'] = 'yes' if not 'errmsg' in kw: kw['errmsg'] = 'not found' # pkg-config version if 'atleast_pkgconfig_version' in kw: pass elif 'modversion' in kw: if not 'uselib_store' in kw: kw['uselib_store'] = kw['modversion'] if not 'define_name' in kw: kw['define_name'] = '%s_VERSION' % Utils.quote_define_name(kw['uselib_store']) else: if not 'uselib_store' in kw: kw['uselib_store'] = Utils.to_list(kw['package'])[0].upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(kw['uselib_store']) @conf def exec_cfg(self, kw): """ Executes ``pkg-config`` or other ``-config`` applications to collect configuration flags: * if atleast_pkgconfig_version is given, check that pkg-config has the version n and return * if modversion is given, then return the module version * else, execute the *-config* program with the *args* and *variables* given, and set the flags on the *conf.env.FLAGS_name* variable :param path: the **-config program to use** :type path: list of string :param atleast_pkgconfig_version: minimum pkg-config version to use (disable other tests) :type atleast_pkgconfig_version: string :param package: package name, for example *gtk+-2.0* :type package: string :param uselib_store: if the test is successful, define HAVE\\_*name*. It is also used to define *conf.env.FLAGS_name* variables. :type uselib_store: string :param modversion: if provided, return the version of the given module and define *name*\\_VERSION :type modversion: string :param args: arguments to give to *package* when retrieving flags :type args: list of string :param variables: return the values of particular variables :type variables: list of string :param define_variable: additional variables to define (also in conf.env.PKG_CONFIG_DEFINES) :type define_variable: dict(string: string) :param pkg_config_path: paths where pkg-config should search for .pc config files (overrides env.PKG_CONFIG_PATH if exists) :type pkg_config_path: string, list of directories separated by colon :param force_static: force usage of static libraries :type force_static: bool default False :param posix: usage of POSIX mode for shlex lexical analiysis library :type posix: bool default True """ path = Utils.to_list(kw['path']) env = self.env.env or None if kw.get('pkg_config_path'): if not env: env = dict(self.environ) env['PKG_CONFIG_PATH'] = kw['pkg_config_path'] def define_it(): define_name = kw['define_name'] # by default, add HAVE_X to the config.h, else provide DEFINES_X for use=X if kw.get('global_define', 1): self.define(define_name, 1, False) else: self.env.append_unique('DEFINES_%s' % kw['uselib_store'], "%s=1" % define_name) if kw.get('add_have_to_env', 1): self.env[define_name] = 1 # pkg-config version if 'atleast_pkgconfig_version' in kw: cmd = path + ['--atleast-pkgconfig-version=%s' % kw['atleast_pkgconfig_version']] self.cmd_and_log(cmd, env=env) return # single version for a module if 'modversion' in kw: version = self.cmd_and_log(path + ['--modversion', kw['modversion']], env=env).strip() if not 'okmsg' in kw: kw['okmsg'] = version self.define(kw['define_name'], version) return version lst = [] + path defi = kw.get('define_variable') if not defi: defi = self.env.PKG_CONFIG_DEFINES or {} for key, val in defi.items(): lst.append('--define-variable=%s=%s' % (key, val)) static = kw.get('force_static', False) if 'args' in kw: args = Utils.to_list(kw['args']) if '--static' in args or '--static-libs' in args: static = True lst += args # tools like pkgconf expect the package argument after the -- ones -_- lst.extend(Utils.to_list(kw['package'])) # retrieving variables of a module if 'variables' in kw: v_env = kw.get('env', self.env) vars = Utils.to_list(kw['variables']) for v in vars: val = self.cmd_and_log(lst + ['--variable=' + v], env=env).strip() var = '%s_%s' % (kw['uselib_store'], v) v_env[var] = val return # so we assume the command-line will output flags to be parsed afterwards ret = self.cmd_and_log(lst, env=env) define_it() self.parse_flags(ret, kw['uselib_store'], kw.get('env', self.env), force_static=static, posix=kw.get('posix')) return ret @conf def check_cfg(self, *k, **kw): """ Checks for configuration flags using a **-config**-like program (pkg-config, sdl-config, etc). This wraps internal calls to :py:func:`waflib.Tools.c_config.validate_cfg` and :py:func:`waflib.Tools.c_config.exec_cfg` so check exec_cfg parameters descriptions for more details on kw passed A few examples:: def configure(conf): conf.load('compiler_c') conf.check_cfg(package='glib-2.0', args='--libs --cflags') conf.check_cfg(package='pango') conf.check_cfg(package='pango', uselib_store='MYPANGO', args=['--cflags', '--libs']) conf.check_cfg(package='pango', args=['pango >= 0.1.0', 'pango < 9.9.9', '--cflags', '--libs'], msg="Checking for 'pango 0.1.0'") conf.check_cfg(path='sdl-config', args='--cflags --libs', package='', uselib_store='SDL') conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', package='', uselib_store='OPEN_MPI', mandatory=False) # variables conf.check_cfg(package='gtk+-2.0', variables=['includedir', 'prefix'], uselib_store='FOO') print(conf.env.FOO_includedir) """ self.validate_cfg(kw) if 'msg' in kw: self.start_msg(kw['msg'], **kw) ret = None try: ret = self.exec_cfg(kw) except self.errors.WafError as e: if 'errmsg' in kw: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: self.to_log('Command failure: %s' % e) self.fatal('The configuration failed') else: if not ret: ret = True kw['success'] = ret if 'okmsg' in kw: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret def build_fun(bld): """ Build function that is used for running configuration tests with ``conf.check()`` """ if bld.kw['compile_filename']: node = bld.srcnode.make_node(bld.kw['compile_filename']) node.write(bld.kw['code']) o = bld(features=bld.kw['features'], source=bld.kw['compile_filename'], target='testprog') for k, v in bld.kw.items(): setattr(o, k, v) if not bld.kw.get('quiet'): bld.conf.to_log("==>\n%s\n<==" % bld.kw['code']) @conf def validate_c(self, kw): """ Pre-checks the parameters that will be given to :py:func:`waflib.Configure.run_build` :param compiler: c or cxx (tries to guess what is best) :type compiler: string :param type: cprogram, cshlib, cstlib - not required if *features are given directly* :type type: binary to create :param feature: desired features for the task generator that will execute the test, for example ``cxx cxxstlib`` :type feature: list of string :param fragment: provide a piece of code for the test (default is to let the system create one) :type fragment: string :param uselib_store: define variables after the test is executed (IMPORTANT!) :type uselib_store: string :param use: parameters to use for building (just like the normal *use* keyword) :type use: list of string :param define_name: define to set when the check is over :type define_name: string :param execute: execute the resulting binary :type execute: bool :param define_ret: if execute is set to True, use the execution output in both the define and the return value :type define_ret: bool :param header_name: check for a particular header :type header_name: string :param auto_add_header_name: if header_name was set, add the headers in env.INCKEYS so the next tests will include these headers :type auto_add_header_name: bool """ for x in ('type_name', 'field_name', 'function_name'): if x in kw: Logs.warn('Invalid argument %r in test' % x) if not 'build_fun' in kw: kw['build_fun'] = build_fun if not 'env' in kw: kw['env'] = self.env.derive() env = kw['env'] if not 'compiler' in kw and not 'features' in kw: kw['compiler'] = 'c' if env.CXX_NAME and Task.classes.get('cxx'): kw['compiler'] = 'cxx' if not self.env.CXX: self.fatal('a c++ compiler is required') else: if not self.env.CC: self.fatal('a c compiler is required') if not 'compile_mode' in kw: kw['compile_mode'] = 'c' if 'cxx' in Utils.to_list(kw.get('features', [])) or kw.get('compiler') == 'cxx': kw['compile_mode'] = 'cxx' if not 'type' in kw: kw['type'] = 'cprogram' if not 'features' in kw: if not 'header_name' in kw or kw.get('link_header_test', True): kw['features'] = [kw['compile_mode'], kw['type']] # "c ccprogram" else: kw['features'] = [kw['compile_mode']] else: kw['features'] = Utils.to_list(kw['features']) if not 'compile_filename' in kw: kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '') def to_header(dct): if 'header_name' in dct: dct = Utils.to_list(dct['header_name']) return ''.join(['#include <%s>\n' % x for x in dct]) return '' if 'framework_name' in kw: # OSX, not sure this is used anywhere fwkname = kw['framework_name'] if not 'uselib_store' in kw: kw['uselib_store'] = fwkname.upper() if not kw.get('no_header'): fwk = '%s/%s.h' % (fwkname, fwkname) if kw.get('remove_dot_h'): fwk = fwk[:-2] val = kw.get('header_name', []) kw['header_name'] = Utils.to_list(val) + [fwk] kw['msg'] = 'Checking for framework %s' % fwkname kw['framework'] = fwkname elif 'header_name' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for header %s' % kw['header_name'] l = Utils.to_list(kw['header_name']) assert len(l), 'list of headers in header_name is empty' kw['code'] = to_header(kw) + SNIP_EMPTY_PROGRAM if not 'uselib_store' in kw: kw['uselib_store'] = l[0].upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(l[0]) if 'lib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for library %s' % kw['lib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['lib'].upper() if 'stlib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for static library %s' % kw['stlib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['stlib'].upper() if 'fragment' in kw: # an additional code fragment may be provided to replace the predefined code # in custom headers kw['code'] = kw['fragment'] if not 'msg' in kw: kw['msg'] = 'Checking for code snippet' if not 'errmsg' in kw: kw['errmsg'] = 'no' for (flagsname,flagstype) in (('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')): if flagsname in kw: if not 'msg' in kw: kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname]) if not 'errmsg' in kw: kw['errmsg'] = 'no' if not 'execute' in kw: kw['execute'] = False if kw['execute']: kw['features'].append('test_exec') kw['chmod'] = Utils.O755 if not 'errmsg' in kw: kw['errmsg'] = 'not found' if not 'okmsg' in kw: kw['okmsg'] = 'yes' if not 'code' in kw: kw['code'] = SNIP_EMPTY_PROGRAM # if there are headers to append automatically to the next tests if self.env[INCKEYS]: kw['code'] = '\n'.join(['#include <%s>' % x for x in self.env[INCKEYS]]) + '\n' + kw['code'] # in case defines lead to very long command-lines if kw.get('merge_config_header') or env.merge_config_header: kw['code'] = '%s\n\n%s' % (self.get_config_header(), kw['code']) env.DEFINES = [] # modify the copy if not kw.get('success'): kw['success'] = None if 'define_name' in kw: self.undefine(kw['define_name']) if not 'msg' in kw: self.fatal('missing "msg" in conf.check(...)') @conf def post_check(self, *k, **kw): """ Sets the variables after a test executed in :py:func:`waflib.Tools.c_config.check` was run successfully """ is_success = 0 if kw['execute']: if kw['success'] is not None: if kw.get('define_ret'): is_success = kw['success'] else: is_success = (kw['success'] == 0) else: is_success = (kw['success'] == 0) if kw.get('define_name'): comment = kw.get('comment', '') define_name = kw['define_name'] if kw['execute'] and kw.get('define_ret') and isinstance(is_success, str): if kw.get('global_define', 1): self.define(define_name, is_success, quote=kw.get('quote', 1), comment=comment) else: if kw.get('quote', 1): succ = '"%s"' % is_success else: succ = int(is_success) val = '%s=%s' % (define_name, succ) var = 'DEFINES_%s' % kw['uselib_store'] self.env.append_value(var, val) else: if kw.get('global_define', 1): self.define_cond(define_name, is_success, comment=comment) else: var = 'DEFINES_%s' % kw['uselib_store'] self.env.append_value(var, '%s=%s' % (define_name, int(is_success))) # define conf.env.HAVE_X to 1 if kw.get('add_have_to_env', 1): if kw.get('uselib_store'): self.env[self.have_define(kw['uselib_store'])] = 1 elif kw['execute'] and kw.get('define_ret'): self.env[define_name] = is_success else: self.env[define_name] = int(is_success) if 'header_name' in kw: if kw.get('auto_add_header_name'): self.env.append_value(INCKEYS, Utils.to_list(kw['header_name'])) if is_success and 'uselib_store' in kw: from waflib.Tools import ccroot # See get_uselib_vars in ccroot.py _vars = set() for x in kw['features']: if x in ccroot.USELIB_VARS: _vars |= ccroot.USELIB_VARS[x] for k in _vars: x = k.lower() if x in kw: self.env.append_value(k + '_' + kw['uselib_store'], kw[x]) return is_success @conf def check(self, *k, **kw): """ Performs a configuration test by calling :py:func:`waflib.Configure.run_build`. For the complete list of parameters, see :py:func:`waflib.Tools.c_config.validate_c`. To force a specific compiler, pass ``compiler='c'`` or ``compiler='cxx'`` to the list of arguments Besides build targets, complete builds can be given through a build function. All files will be written to a temporary directory:: def build(bld): lib_node = bld.srcnode.make_node('libdir/liblc1.c') lib_node.parent.mkdir() lib_node.write('#include \\nint lib_func(void) { FILE *f = fopen("foo", "r");}\\n', 'w') bld(features='c cshlib', source=[lib_node], linkflags=conf.env.EXTRA_LDFLAGS, target='liblc') conf.check(build_fun=build, msg=msg) """ self.validate_c(kw) self.start_msg(kw['msg'], **kw) ret = None try: ret = self.run_build(*k, **kw) except self.errors.ConfigurationError: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: raise else: self.fatal('The configuration failed') else: kw['success'] = ret ret = self.post_check(*k, **kw) if not ret: self.end_msg(kw['errmsg'], 'YELLOW', **kw) self.fatal('The configuration failed %r' % ret) else: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret class test_exec(Task.Task): """ A task that runs programs after they are built. See :py:func:`waflib.Tools.c_config.test_exec_fun`. """ color = 'PINK' def run(self): cmd = [self.inputs[0].abspath()] + getattr(self.generator, 'test_args', []) if getattr(self.generator, 'rpath', None): if getattr(self.generator, 'define_ret', False): self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd) else: self.generator.bld.retval = self.generator.bld.exec_command(cmd) else: env = self.env.env or {} env.update(dict(os.environ)) for var in ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'PATH'): env[var] = self.inputs[0].parent.abspath() + os.path.pathsep + env.get(var, '') if getattr(self.generator, 'define_ret', False): self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd, env=env) else: self.generator.bld.retval = self.generator.bld.exec_command(cmd, env=env) @feature('test_exec') @after_method('apply_link') def test_exec_fun(self): """ The feature **test_exec** is used to create a task that will to execute the binary created (link task output) during the build. The exit status will be set on the build context, so only one program may have the feature *test_exec*. This is used by configuration tests:: def configure(conf): conf.check(execute=True) """ self.create_task('test_exec', self.link_task.outputs[0]) @conf def check_cxx(self, *k, **kw): """ Runs a test with a task generator of the form:: conf.check(features='cxx cxxprogram', ...) """ kw['compiler'] = 'cxx' return self.check(*k, **kw) @conf def check_cc(self, *k, **kw): """ Runs a test with a task generator of the form:: conf.check(features='c cprogram', ...) """ kw['compiler'] = 'c' return self.check(*k, **kw) @conf def set_define_comment(self, key, comment): """ Sets a comment that will appear in the configuration header :type key: string :type comment: string """ coms = self.env.DEFINE_COMMENTS if not coms: coms = self.env.DEFINE_COMMENTS = {} coms[key] = comment or '' @conf def get_define_comment(self, key): """ Returns the comment associated to a define :type key: string """ coms = self.env.DEFINE_COMMENTS or {} return coms.get(key, '') @conf def define(self, key, val, quote=True, comment=''): """ Stores a single define and its state into ``conf.env.DEFINES``. The value is cast to an integer (0/1). :param key: define name :type key: string :param val: value :type val: int or string :param quote: enclose strings in quotes (yes by default) :type quote: bool """ assert isinstance(key, str) if not key: return if val is True: val = 1 elif val in (False, None): val = 0 if isinstance(val, int) or isinstance(val, float): s = '%s=%s' else: s = quote and '%s="%s"' or '%s=%s' app = s % (key, str(val)) ban = key + '=' lst = self.env.DEFINES for x in lst: if x.startswith(ban): lst[lst.index(x)] = app break else: self.env.append_value('DEFINES', app) self.env.append_unique(DEFKEYS, key) self.set_define_comment(key, comment) @conf def undefine(self, key, comment=''): """ Removes a global define from ``conf.env.DEFINES`` :param key: define name :type key: string """ assert isinstance(key, str) if not key: return ban = key + '=' lst = [x for x in self.env.DEFINES if not x.startswith(ban)] self.env.DEFINES = lst self.env.append_unique(DEFKEYS, key) self.set_define_comment(key, comment) @conf def define_cond(self, key, val, comment=''): """ Conditionally defines a name:: def configure(conf): conf.define_cond('A', True) # equivalent to: # if val: conf.define('A', 1) # else: conf.undefine('A') :param key: define name :type key: string :param val: value :type val: int or string """ assert isinstance(key, str) if not key: return if val: self.define(key, 1, comment=comment) else: self.undefine(key, comment=comment) @conf def is_defined(self, key): """ Indicates whether a particular define is globally set in ``conf.env.DEFINES``. :param key: define name :type key: string :return: True if the define is set :rtype: bool """ assert key and isinstance(key, str) ban = key + '=' for x in self.env.DEFINES: if x.startswith(ban): return True return False @conf def get_define(self, key): """ Returns the value of an existing define, or None if not found :param key: define name :type key: string :rtype: string """ assert key and isinstance(key, str) ban = key + '=' for x in self.env.DEFINES: if x.startswith(ban): return x[len(ban):] return None @conf def have_define(self, key): """ Returns a variable suitable for command-line or header use by removing invalid characters and prefixing it with ``HAVE_`` :param key: define name :type key: string :return: the input key prefixed by *HAVE_* and substitute any invalid characters. :rtype: string """ return (self.env.HAVE_PAT or 'HAVE_%s') % Utils.quote_define_name(key) @conf def write_config_header(self, configfile='', guard='', top=False, defines=True, headers=False, remove=True, define_prefix=''): """ Writes a configuration header containing defines and includes:: def configure(cnf): cnf.define('A', 1) cnf.write_config_header('config.h') This function only adds include guards (if necessary), consult :py:func:`waflib.Tools.c_config.get_config_header` for details on the body. :param configfile: path to the file to create (relative or absolute) :type configfile: string :param guard: include guard name to add, by default it is computed from the file name :type guard: string :param top: write the configuration header from the build directory (default is from the current path) :type top: bool :param defines: add the defines (yes by default) :type defines: bool :param headers: add #include in the file :type headers: bool :param remove: remove the defines after they are added (yes by default, works like in autoconf) :type remove: bool :type define_prefix: string :param define_prefix: prefix all the defines in the file with a particular prefix """ if not configfile: configfile = WAF_CONFIG_H waf_guard = guard or 'W_%s_WAF' % Utils.quote_define_name(configfile) node = top and self.bldnode or self.path.get_bld() node = node.make_node(configfile) node.parent.mkdir() lst = ['/* WARNING! All changes made to this file will be lost! */\n'] lst.append('#ifndef %s\n#define %s\n' % (waf_guard, waf_guard)) lst.append(self.get_config_header(defines, headers, define_prefix=define_prefix)) lst.append('\n#endif /* %s */\n' % waf_guard) node.write('\n'.join(lst)) # config files must not be removed on "waf clean" self.env.append_unique(Build.CFG_FILES, [node.abspath()]) if remove: for key in self.env[DEFKEYS]: self.undefine(key) self.env[DEFKEYS] = [] @conf def get_config_header(self, defines=True, headers=False, define_prefix=''): """ Creates the contents of a ``config.h`` file from the defines and includes set in conf.env.define_key / conf.env.include_key. No include guards are added. A prelude will be added from the variable env.WAF_CONFIG_H_PRELUDE if provided. This can be used to insert complex macros or include guards:: def configure(conf): conf.env.WAF_CONFIG_H_PRELUDE = '#include \\n' conf.write_config_header('config.h') :param defines: write the defines values :type defines: bool :param headers: write include entries for each element in self.env.INCKEYS :type headers: bool :type define_prefix: string :param define_prefix: prefix all the defines with a particular prefix :return: the contents of a ``config.h`` file :rtype: string """ lst = [] if self.env.WAF_CONFIG_H_PRELUDE: lst.append(self.env.WAF_CONFIG_H_PRELUDE) if headers: for x in self.env[INCKEYS]: lst.append('#include <%s>' % x) if defines: tbl = {} for k in self.env.DEFINES: a, _, b = k.partition('=') tbl[a] = b for k in self.env[DEFKEYS]: caption = self.get_define_comment(k) if caption: caption = ' /* %s */' % caption try: txt = '#define %s%s %s%s' % (define_prefix, k, tbl[k], caption) except KeyError: txt = '/* #undef %s%s */%s' % (define_prefix, k, caption) lst.append(txt) return "\n".join(lst) @conf def cc_add_flags(conf): """ Adds CFLAGS / CPPFLAGS from os.environ to conf.env """ conf.add_os_flags('CPPFLAGS', dup=False) conf.add_os_flags('CFLAGS', dup=False) @conf def cxx_add_flags(conf): """ Adds CXXFLAGS / CPPFLAGS from os.environ to conf.env """ conf.add_os_flags('CPPFLAGS', dup=False) conf.add_os_flags('CXXFLAGS', dup=False) @conf def link_add_flags(conf): """ Adds LINKFLAGS / LDFLAGS from os.environ to conf.env """ conf.add_os_flags('LINKFLAGS', dup=False) conf.add_os_flags('LDFLAGS', dup=False) @conf def cc_load_tools(conf): """ Loads the Waf c extensions """ if not conf.env.DEST_OS: conf.env.DEST_OS = Utils.unversioned_sys_platform() conf.load('c') @conf def cxx_load_tools(conf): """ Loads the Waf c++ extensions """ if not conf.env.DEST_OS: conf.env.DEST_OS = Utils.unversioned_sys_platform() conf.load('cxx') @conf def get_cc_version(conf, cc, gcc=False, icc=False, clang=False): """ Runs the preprocessor to determine the gcc/icc/clang version The variables CC_VERSION, DEST_OS, DEST_BINFMT and DEST_CPU will be set in *conf.env* :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-dM', '-E', '-'] env = conf.env.env or None try: out, err = conf.cmd_and_log(cmd, output=0, input='\n'.encode(), env=env) except Errors.WafError: conf.fatal('Could not determine the compiler version %r' % cmd) if gcc: if out.find('__INTEL_COMPILER') >= 0: conf.fatal('The intel compiler pretends to be gcc') if out.find('__GNUC__') < 0 and out.find('__clang__') < 0: conf.fatal('Could not determine the compiler type') if icc and out.find('__INTEL_COMPILER') < 0: conf.fatal('Not icc/icpc') if clang and out.find('__clang__') < 0: conf.fatal('Not clang/clang++') if not clang and out.find('__clang__') >= 0: conf.fatal('Could not find gcc/g++ (only Clang), if renamed try eg: CC=gcc48 CXX=g++48 waf configure') k = {} if icc or gcc or clang: out = out.splitlines() for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k # Some documentation is available at http://predef.sourceforge.net # The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns. if not conf.env.DEST_OS: conf.env.DEST_OS = '' for i in MACRO_TO_DESTOS: if isD(i): conf.env.DEST_OS = MACRO_TO_DESTOS[i] break else: if isD('__APPLE__') and isD('__MACH__'): conf.env.DEST_OS = 'darwin' elif isD('__unix__'): # unix must be tested last as it's a generic fallback conf.env.DEST_OS = 'generic' if isD('__ELF__'): conf.env.DEST_BINFMT = 'elf' elif isD('__WINNT__') or isD('__CYGWIN__') or isD('_WIN32'): conf.env.DEST_BINFMT = 'pe' if not conf.env.IMPLIBDIR: conf.env.IMPLIBDIR = conf.env.LIBDIR # for .lib or .dll.a files conf.env.LIBDIR = conf.env.BINDIR elif isD('__APPLE__'): conf.env.DEST_BINFMT = 'mac-o' if not conf.env.DEST_BINFMT: # Infer the binary format from the os name. conf.env.DEST_BINFMT = Utils.destos_to_binfmt(conf.env.DEST_OS) for i in MACRO_TO_DEST_CPU: if isD(i): conf.env.DEST_CPU = MACRO_TO_DEST_CPU[i] break Logs.debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')])) if icc: ver = k['__INTEL_COMPILER'] conf.env.CC_VERSION = (ver[:-2], ver[-2], ver[-1]) else: if isD('__clang__') and isD('__clang_major__'): conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) else: # older clang versions and gcc conf.env.CC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k.get('__GNUC_PATCHLEVEL__', '0')) return k @conf def get_xlc_version(conf, cc): """ Returns the Aix compiler version :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-qversion'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find xlc %r' % cmd) # the intention is to catch the 8.0 in "IBM XL C/C++ Enterprise Edition V8.0 for AIX..." for v in (r"IBM XL C/C\+\+.* V(?P\d*)\.(?P\d*)",): version_re = re.compile(v, re.I).search match = version_re(out or err) if match: k = match.groupdict() conf.env.CC_VERSION = (k['major'], k['minor']) break else: conf.fatal('Could not determine the XLC version.') @conf def get_suncc_version(conf, cc): """ Returns the Sun compiler version :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-V'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError as e: # Older versions of the compiler exit with non-zero status when reporting their version if not (hasattr(e, 'returncode') and hasattr(e, 'stdout') and hasattr(e, 'stderr')): conf.fatal('Could not find suncc %r' % cmd) out = e.stdout err = e.stderr version = (out or err) version = version.splitlines()[0] # cc: Sun C 5.10 SunOS_i386 2009/06/03 # cc: Studio 12.5 Sun C++ 5.14 SunOS_sparc Beta 2015/11/17 # cc: WorkShop Compilers 5.0 98/12/15 C 5.0 version_re = re.compile(r'cc: (studio.*?|\s+)?(sun\s+(c\+\+|c)|(WorkShop\s+Compilers))?\s+(?P\d*)\.(?P\d*)', re.I).search match = version_re(version) if match: k = match.groupdict() conf.env.CC_VERSION = (k['major'], k['minor']) else: conf.fatal('Could not determine the suncc version.') # ============ the --as-needed flag should added during the configuration, not at runtime ========= @conf def add_as_needed(self): """ Adds ``--as-needed`` to the *LINKFLAGS* On some platforms, it is a default flag. In some cases (e.g., in NS-3) it is necessary to explicitly disable this feature with `-Wl,--no-as-needed` flag. """ if self.env.DEST_BINFMT == 'elf' and 'gcc' in (self.env.CXX_NAME, self.env.CC_NAME): self.env.append_unique('LINKFLAGS', '-Wl,--as-needed') # ============ parallel configuration class cfgtask(Task.Task): """ A task that executes build configuration tests (calls conf.check) Make sure to use locks if concurrent access to the same conf.env data is necessary. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.run_after = set() def display(self): return '' def runnable_status(self): for x in self.run_after: if not x.hasrun: return Task.ASK_LATER return Task.RUN_ME def uid(self): return Utils.SIG_NIL def signature(self): return Utils.SIG_NIL def run(self): conf = self.conf bld = Build.BuildContext(top_dir=conf.srcnode.abspath(), out_dir=conf.bldnode.abspath()) bld.env = conf.env bld.init_dirs() bld.in_msg = 1 # suppress top-level start_msg bld.logger = self.logger bld.multicheck_task = self args = self.args try: if 'func' in args: bld.test(build_fun=args['func'], msg=args.get('msg', ''), okmsg=args.get('okmsg', ''), errmsg=args.get('errmsg', ''), ) else: args['multicheck_mandatory'] = args.get('mandatory', True) args['mandatory'] = True try: bld.check(**args) finally: args['mandatory'] = args['multicheck_mandatory'] except Exception: return 1 def process(self): Task.Task.process(self) if 'msg' in self.args: with self.generator.bld.multicheck_lock: self.conf.start_msg(self.args['msg']) if self.hasrun == Task.NOT_RUN: self.conf.end_msg('test cancelled', 'YELLOW') elif self.hasrun != Task.SUCCESS: self.conf.end_msg(self.args.get('errmsg', 'no'), 'YELLOW') else: self.conf.end_msg(self.args.get('okmsg', 'yes'), 'GREEN') @conf def multicheck(self, *k, **kw): """ Runs configuration tests in parallel; results are printed sequentially at the end of the build but each test must provide its own msg value to display a line:: def test_build(ctx): ctx.in_msg = True # suppress console outputs ctx.check_large_file(mandatory=False) conf.multicheck( {'header_name':'stdio.h', 'msg':'... stdio', 'uselib_store':'STDIO', 'global_define':False}, {'header_name':'xyztabcd.h', 'msg':'... optional xyztabcd.h', 'mandatory': False}, {'header_name':'stdlib.h', 'msg':'... stdlib', 'okmsg': 'aye', 'errmsg': 'nope'}, {'func': test_build, 'msg':'... testing an arbitrary build function', 'okmsg':'ok'}, msg = 'Checking for headers in parallel', mandatory = True, # mandatory tests raise an error at the end run_all_tests = True, # try running all tests ) The configuration tests may modify the values in conf.env in any order, and the define values can affect configuration tests being executed. It is hence recommended to provide `uselib_store` values with `global_define=False` to prevent such issues. """ self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)), **kw) # Force a copy so that threads append to the same list at least # no order is guaranteed, but the values should not disappear at least for var in ('DEFINES', DEFKEYS): self.env.append_value(var, []) self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {} # define a task object that will execute our tests class par(object): def __init__(self): self.keep = False self.task_sigs = {} self.progress_bar = 0 def total(self): return len(tasks) def to_log(self, *k, **kw): return bld = par() bld.keep = kw.get('run_all_tests', True) bld.imp_sigs = {} tasks = [] id_to_task = {} for counter, dct in enumerate(k): x = Task.classes['cfgtask'](bld=bld, env=None) tasks.append(x) x.args = dct x.args['multicheck_counter'] = counter x.bld = bld x.conf = self x.args = dct # bind a logger that will keep the info in memory x.logger = Logs.make_mem_logger(str(id(x)), self.logger) if 'id' in dct: id_to_task[dct['id']] = x # second pass to set dependencies with after_test/before_test for x in tasks: for key in Utils.to_list(x.args.get('before_tests', [])): tsk = id_to_task[key] if not tsk: raise ValueError('No test named %r' % key) tsk.run_after.add(x) for key in Utils.to_list(x.args.get('after_tests', [])): tsk = id_to_task[key] if not tsk: raise ValueError('No test named %r' % key) x.run_after.add(tsk) def it(): yield tasks while 1: yield [] bld.producer = p = Runner.Parallel(bld, Options.options.jobs) bld.multicheck_lock = Utils.threading.Lock() p.biter = it() self.end_msg('started') p.start() # flush the logs in order into the config.log for x in tasks: x.logger.memhandler.flush() self.start_msg('-> processing test results') if p.error: for x in p.error: if getattr(x, 'err_msg', None): self.to_log(x.err_msg) self.end_msg('fail', color='RED') raise Errors.WafError('There is an error in the library, read config.log for more information') failure_count = 0 for x in tasks: if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN): failure_count += 1 if failure_count: self.end_msg(kw.get('errmsg', '%s test failed' % failure_count), color='YELLOW', **kw) else: self.end_msg('all ok', **kw) for x in tasks: if x.hasrun != Task.SUCCESS: if x.args.get('mandatory', True): self.fatal(kw.get('fatalmsg') or 'One of the tests has failed, read config.log for more information') @conf def check_gcc_o_space(self, mode='c'): if int(self.env.CC_VERSION[0]) > 4: # this is for old compilers return self.env.stash() if mode == 'c': self.env.CCLNK_TGT_F = ['-o', ''] elif mode == 'cxx': self.env.CXXLNK_TGT_F = ['-o', ''] features = '%s %sshlib' % (mode, mode) try: self.check(msg='Checking if the -o link must be split from arguments', fragment=SNIP_EMPTY_PROGRAM, features=features) except self.errors.ConfigurationError: self.env.revert() else: self.env.commit() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Tools/c_osx.py0000660000000000000000000001332600000000000022177 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2008-2018 (ita) """ MacOSX related tools """ import os, shutil, platform from waflib import Task, Utils from waflib.TaskGen import taskgen_method, feature, after_method, before_method app_info = ''' CFBundlePackageType APPL CFBundleGetInfoString Created by Waf CFBundleSignature ???? NOTE THIS IS A GENERATED FILE, DO NOT MODIFY CFBundleExecutable {app_name} ''' """ plist template """ @feature('c', 'cxx') def set_macosx_deployment_target(self): """ see WAF issue 285 and also and also http://trac.macports.org/ticket/17059 """ if self.env.MACOSX_DEPLOYMENT_TARGET: os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ: if Utils.unversioned_sys_platform() == 'darwin': os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2]) @taskgen_method def create_bundle_dirs(self, name, out): """ Creates bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp` """ dir = out.parent.find_or_declare(name) dir.mkdir() macos = dir.find_or_declare(['Contents', 'MacOS']) macos.mkdir() return dir def bundle_name_for_output(out): name = out.name k = name.rfind('.') if k >= 0: name = name[:k] + '.app' else: name = name + '.app' return name @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macapp(self): """ To compile an executable into a Mac application (a .app), set its *mac_app* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_app=True) To force *all* executables to be transformed into Mac applications:: def build(bld): bld.env.MACAPP = True bld.shlib(source='a.c', target='foo') """ if self.env.MACAPP or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'MacOS', out.name]) self.apptask = self.create_task('macapp', self.link_task.outputs, n1) inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755) if getattr(self, 'mac_files', None): # this only accepts files; they will be installed as seen from mac_files_root mac_files_root = getattr(self, 'mac_files_root', None) if isinstance(mac_files_root, str): mac_files_root = self.path.find_node(mac_files_root) if not mac_files_root: self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root) res_dir = n1.parent.parent.make_node('Resources') inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name for node in self.to_nodes(self.mac_files): relpath = node.path_from(mac_files_root or node.parent) self.create_task('macapp', node, res_dir.make_node(relpath)) self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node) if getattr(self.bld, 'is_install', None): # disable regular binary installation self.install_task.hasrun = Task.SKIP_ME @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macplist(self): """ Creates a :py:class:`waflib.Tools.c_osx.macplist` instance. """ if self.env.MACAPP or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'Info.plist']) self.plisttask = plisttask = self.create_task('macplist', [], n1) plisttask.context = { 'app_name': self.link_task.outputs[0].name, 'env': self.env } plist_ctx = getattr(self, 'plist_context', None) if (plist_ctx): plisttask.context.update(plist_ctx) if getattr(self, 'mac_plist', False): node = self.path.find_resource(self.mac_plist) if node: plisttask.inputs.append(node) else: plisttask.code = self.mac_plist else: plisttask.code = app_info inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name self.add_install_files(install_to=inst_to, install_from=n1) @feature('cshlib', 'cxxshlib') @before_method('apply_link', 'propagate_uselib_vars') def apply_bundle(self): """ To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_bundle = True) To force *all* executables to be transformed into bundles:: def build(bld): bld.env.MACBUNDLE = True bld.shlib(source='a.c', target='foo') """ if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False): self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = [] # disable the '-dynamiclib' flag self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN use = self.use = self.to_list(getattr(self, 'use', [])) if not 'MACBUNDLE' in use: use.append('MACBUNDLE') app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources'] class macapp(Task.Task): """ Creates mac applications """ color = 'PINK' def run(self): self.outputs[0].parent.mkdir() shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath()) class macplist(Task.Task): """ Creates plist files """ color = 'PINK' ext_in = ['.bin'] def run(self): if getattr(self, 'code', None): txt = self.code else: txt = self.inputs[0].read() context = getattr(self, 'context', {}) txt = txt.format(**context) self.outputs[0].write(txt) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0172093 tevent-0.11.0/third_party/waf/waflib/Tools/c_preproc.py0000660000000000000000000006606500000000000023050 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ C/C++ preprocessor for finding dependencies Reasons for using the Waf preprocessor by default #. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files) #. Not all compilers provide .d files for obtaining the dependencies (portability) #. A naive file scanner will not catch the constructs such as "#include foo()" #. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything) Regarding the speed concerns: * the preprocessing is performed only when files must be compiled * the macros are evaluated only for #if/#elif/#include * system headers are not scanned by default Now if you do not want the Waf preprocessor, the tool +gccdeps* uses the .d files produced during the compilation to track the dependencies (useful when used with the boost libraries). It only works with gcc >= 4.4 though. A dumb preprocessor is also available in the tool *c_dumbpreproc* """ # TODO: more varargs, pragma once import re, string, traceback from waflib import Logs, Utils, Errors class PreprocError(Errors.WafError): pass FILE_CACHE_SIZE = 100000 LINE_CACHE_SIZE = 100000 POPFILE = '-' "Constant representing a special token used in :py:meth:`waflib.Tools.c_preproc.c_parser.start` iteration to switch to a header read previously" recursion_limit = 150 "Limit on the amount of files to read in the dependency scanner" go_absolute = False "Set to True to track headers on files in /usr/include, else absolute paths are ignored (but it becomes very slow)" standard_includes = ['/usr/local/include', '/usr/include'] if Utils.is_win32: standard_includes = [] use_trigraphs = 0 """Apply trigraph rules (False by default)""" # obsolete, do not use strict_quotes = 0 g_optrans = { 'not':'!', 'not_eq':'!', 'and':'&&', 'and_eq':'&=', 'or':'||', 'or_eq':'|=', 'xor':'^', 'xor_eq':'^=', 'bitand':'&', 'bitor':'|', 'compl':'~', } """Operators such as and/or/xor for c++. Set an empty dict to disable.""" # ignore #warning and #error re_lines = re.compile( '^[ \t]*(?:#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$', re.IGNORECASE | re.MULTILINE) """Match #include lines""" re_mac = re.compile(r"^[a-zA-Z_]\w*") """Match macro definitions""" re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]') """Match macro functions""" re_pragma_once = re.compile(r'^\s*once\s*', re.IGNORECASE) """Match #pragma once statements""" re_nl = re.compile('\\\\\r*\n', re.MULTILINE) """Match newlines""" re_cpp = re.compile(r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE ) """Filter C/C++ comments""" trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')] """Trigraph definitions""" chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39} """Escape characters""" NUM = 'i' """Number token""" OP = 'O' """Operator token""" IDENT = 'T' """Identifier token""" STR = 's' """String token""" CHAR = 'c' """Character token""" tok_types = [NUM, STR, IDENT, OP] """Token types""" exp_types = [ r"""0[xX](?P[a-fA-F0-9]+)(?P[uUlL]*)|L*?'(?P(\\.|[^\\'])+)'|(?P\d+)[Ee](?P[+-]*?\d+)(?P[fFlL]*)|(?P\d*\.\d+)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P\d+\.\d*)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P0*)(?P\d+)(?P[uUlL]*)""", r'L?"([^"\\]|\\.)*"', r'[a-zA-Z_]\w*', r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]', ] """Expression types""" re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M) """Match expressions into tokens""" accepted = 'a' """Parser state is *accepted*""" ignored = 'i' """Parser state is *ignored*, for example preprocessor lines in an #if 0 block""" undefined = 'u' """Parser state is *undefined* at the moment""" skipped = 's' """Parser state is *skipped*, for example preprocessor lines in a #elif 0 block""" def repl(m): """Replace function used with :py:attr:`waflib.Tools.c_preproc.re_cpp`""" s = m.group() if s[0] == '/': return ' ' return s prec = {} """ Operator precedence rules required for parsing expressions of the form:: #if 1 && 2 != 0 """ ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ','] for x, syms in enumerate(ops): for u in syms.split(): prec[u] = x def reduce_nums(val_1, val_2, val_op): """ Apply arithmetic rules to compute a result :param val1: input parameter :type val1: int or string :param val2: input parameter :type val2: int or string :param val_op: C operator in *+*, */*, *-*, etc :type val_op: string :rtype: int """ #print val_1, val_2, val_op # now perform the operation, make certain a and b are numeric try: a = 0 + val_1 except TypeError: a = int(val_1) try: b = 0 + val_2 except TypeError: b = int(val_2) d = val_op if d == '%': c = a % b elif d=='+': c = a + b elif d=='-': c = a - b elif d=='*': c = a * b elif d=='/': c = a / b elif d=='^': c = a ^ b elif d=='==': c = int(a == b) elif d=='|' or d == 'bitor': c = a | b elif d=='||' or d == 'or' : c = int(a or b) elif d=='&' or d == 'bitand': c = a & b elif d=='&&' or d == 'and': c = int(a and b) elif d=='!=' or d == 'not_eq': c = int(a != b) elif d=='^' or d == 'xor': c = int(a^b) elif d=='<=': c = int(a <= b) elif d=='<': c = int(a < b) elif d=='>': c = int(a > b) elif d=='>=': c = int(a >= b) elif d=='<<': c = a << b elif d=='>>': c = a >> b else: c = 0 return c def get_num(lst): """ Try to obtain a number from a list of tokens. The token types are defined in :py:attr:`waflib.Tools.ccroot.tok_types`. :param lst: list of preprocessor tokens :type lst: list of tuple (tokentype, value) :return: a pair containing the number and the rest of the list :rtype: tuple(value, list) """ if not lst: raise PreprocError('empty list for get_num') (p, v) = lst[0] if p == OP: if v == '(': count_par = 1 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 if count_par == 0: break elif v == '(': count_par += 1 i += 1 else: raise PreprocError('rparen expected %r' % lst) (num, _) = get_term(lst[1:i]) return (num, lst[i+1:]) elif v == '+': return get_num(lst[1:]) elif v == '-': num, lst = get_num(lst[1:]) return (reduce_nums('-1', num, '*'), lst) elif v == '!': num, lst = get_num(lst[1:]) return (int(not int(num)), lst) elif v == '~': num, lst = get_num(lst[1:]) return (~ int(num), lst) else: raise PreprocError('Invalid op token %r for get_num' % lst) elif p == NUM: return v, lst[1:] elif p == IDENT: # all macros should have been replaced, remaining identifiers eval to 0 return 0, lst[1:] else: raise PreprocError('Invalid token %r for get_num' % lst) def get_term(lst): """ Evaluate an expression recursively, for example:: 1+1+1 -> 2+1 -> 3 :param lst: list of tokens :type lst: list of tuple(token, value) :return: the value and the remaining tokens :rtype: value, list """ if not lst: raise PreprocError('empty list for get_term') num, lst = get_num(lst) if not lst: return (num, []) (p, v) = lst[0] if p == OP: if v == ',': # skip return get_term(lst[1:]) elif v == '?': count_par = 0 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 elif v == '(': count_par += 1 elif v == ':': if count_par == 0: break i += 1 else: raise PreprocError('rparen expected %r' % lst) if int(num): return get_term(lst[1:i]) else: return get_term(lst[i+1:]) else: num2, lst = get_num(lst[1:]) if not lst: # no more tokens to process num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) # operator precedence p2, v2 = lst[0] if p2 != OP: raise PreprocError('op expected %r' % lst) if prec[v2] >= prec[v]: num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) else: num3, lst = get_num(lst[1:]) num3 = reduce_nums(num2, num3, v2) return get_term([(NUM, num), (p, v), (NUM, num3)] + lst) raise PreprocError('cannot reduce %r' % lst) def reduce_eval(lst): """ Take a list of tokens and output true or false for #if/#elif conditions. :param lst: a list of tokens :type lst: list of tuple(token, value) :return: a token :rtype: tuple(NUM, int) """ num, lst = get_term(lst) return (NUM, num) def stringize(lst): """ Merge a list of tokens into a string :param lst: a list of tokens :type lst: list of tuple(token, value) :rtype: string """ lst = [str(v2) for (p2, v2) in lst] return "".join(lst) def paste_tokens(t1, t2): """ Token pasting works between identifiers, particular operators, and identifiers and numbers:: a ## b -> ab > ## = -> >= a ## 2 -> a2 :param t1: token :type t1: tuple(type, value) :param t2: token :type t2: tuple(type, value) """ p1 = None if t1[0] == OP and t2[0] == OP: p1 = OP elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM): p1 = IDENT elif t1[0] == NUM and t2[0] == NUM: p1 = NUM if not p1: raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2)) return (p1, t1[1] + t2[1]) def reduce_tokens(lst, defs, ban=[]): """ Replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied :param lst: list of tokens :type lst: list of tuple(token, value) :param defs: macro definitions :type defs: dict :param ban: macros that cannot be substituted (recursion is not allowed) :type ban: list of string :return: the new list of tokens :rtype: value, list """ i = 0 while i < len(lst): (p, v) = lst[i] if p == IDENT and v == "defined": del lst[i] if i < len(lst): (p2, v2) = lst[i] if p2 == IDENT: if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) elif p2 == OP and v2 == '(': del lst[i] (p2, v2) = lst[i] del lst[i] # remove the ident, and change the ) for the value if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) else: raise PreprocError('Invalid define expression %r' % lst) elif p == IDENT and v in defs: if isinstance(defs[v], str): a, b = extract_macro(defs[v]) defs[v] = b macro_def = defs[v] to_add = macro_def[1] if isinstance(macro_def[0], list): # macro without arguments del lst[i] accu = to_add[:] reduce_tokens(accu, defs, ban+[v]) for tmp in accu: lst.insert(i, tmp) i += 1 else: # collect the arguments for the funcall args = [] del lst[i] if i >= len(lst): raise PreprocError('expected ( after %r (got nothing)' % v) (p2, v2) = lst[i] if p2 != OP or v2 != '(': raise PreprocError('expected ( after %r' % v) del lst[i] one_param = [] count_paren = 0 while i < len(lst): p2, v2 = lst[i] del lst[i] if p2 == OP and count_paren == 0: if v2 == '(': one_param.append((p2, v2)) count_paren += 1 elif v2 == ')': if one_param: args.append(one_param) break elif v2 == ',': if not one_param: raise PreprocError('empty param in funcall %r' % v) args.append(one_param) one_param = [] else: one_param.append((p2, v2)) else: one_param.append((p2, v2)) if v2 == '(': count_paren += 1 elif v2 == ')': count_paren -= 1 else: raise PreprocError('malformed macro') # substitute the arguments within the define expression accu = [] arg_table = macro_def[0] j = 0 while j < len(to_add): (p2, v2) = to_add[j] if p2 == OP and v2 == '#': # stringize is for arguments only if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] accu.append((STR, stringize(toks))) j += 1 else: accu.append((p2, v2)) elif p2 == OP and v2 == '##': # token pasting, how can man invent such a complicated system? if accu and j+1 < len(to_add): # we have at least two tokens t1 = accu[-1] if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] if toks: accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1]) accu.extend(toks[1:]) else: # error, case "a##" accu.append((p2, v2)) accu.extend(toks) elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__': # first collect the tokens va_toks = [] st = len(macro_def[0]) pt = len(args) for x in args[pt-st+1:]: va_toks.extend(x) va_toks.append((OP, ',')) if va_toks: va_toks.pop() # extra comma if len(accu)>1: (p3, v3) = accu[-1] (p4, v4) = accu[-2] if v3 == '##': # remove the token paste accu.pop() if v4 == ',' and pt < st: # remove the comma accu.pop() accu += va_toks else: accu[-1] = paste_tokens(t1, to_add[j+1]) j += 1 else: # Invalid paste, case "##a" or "b##" accu.append((p2, v2)) elif p2 == IDENT and v2 in arg_table: toks = args[arg_table[v2]] reduce_tokens(toks, defs, ban+[v]) accu.extend(toks) else: accu.append((p2, v2)) j += 1 reduce_tokens(accu, defs, ban+[v]) for x in range(len(accu)-1, -1, -1): lst.insert(i, accu[x]) i += 1 def eval_macro(lst, defs): """ Reduce the tokens by :py:func:`waflib.Tools.c_preproc.reduce_tokens` and try to return a 0/1 result by :py:func:`waflib.Tools.c_preproc.reduce_eval`. :param lst: list of tokens :type lst: list of tuple(token, value) :param defs: macro definitions :type defs: dict :rtype: int """ reduce_tokens(lst, defs, []) if not lst: raise PreprocError('missing tokens to evaluate') if lst: p, v = lst[0] if p == IDENT and v not in defs: raise PreprocError('missing macro %r' % lst) p, v = reduce_eval(lst) return int(v) != 0 def extract_macro(txt): """ Process a macro definition of the form:: #define f(x, y) x * y into a function or a simple macro without arguments :param txt: expression to exact a macro definition from :type txt: string :return: a tuple containing the name, the list of arguments and the replacement :rtype: tuple(string, [list, list]) """ t = tokenize(txt) if re_fun.search(txt): p, name = t[0] p, v = t[1] if p != OP: raise PreprocError('expected (') i = 1 pindex = 0 params = {} prev = '(' while 1: i += 1 p, v = t[i] if prev == '(': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == ')': break else: raise PreprocError('unexpected token (3)') elif prev == IDENT: if p == OP and v == ',': prev = v elif p == OP and v == ')': break else: raise PreprocError('comma or ... expected') elif prev == ',': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == '...': raise PreprocError('not implemented (1)') else: raise PreprocError('comma or ... expected (2)') elif prev == '...': raise PreprocError('not implemented (2)') else: raise PreprocError('unexpected else') #~ print (name, [params, t[i+1:]]) return (name, [params, t[i+1:]]) else: (p, v) = t[0] if len(t) > 1: return (v, [[], t[1:]]) else: # empty define, assign an empty token return (v, [[], [('T','')]]) re_include = re.compile(r'^\s*(<(?:.*)>|"(?:.*)")') def extract_include(txt, defs): """ Process a line in the form:: #include foo :param txt: include line to process :type txt: string :param defs: macro definitions :type defs: dict :return: the file name :rtype: string """ m = re_include.search(txt) if m: txt = m.group(1) return txt[0], txt[1:-1] # perform preprocessing and look at the result, it must match an include toks = tokenize(txt) reduce_tokens(toks, defs, ['waf_include']) if not toks: raise PreprocError('could not parse include %r' % txt) if len(toks) == 1: if toks[0][0] == STR: return '"', toks[0][1] else: if toks[0][1] == '<' and toks[-1][1] == '>': ret = '<', stringize(toks).lstrip('<').rstrip('>') return ret raise PreprocError('could not parse include %r' % txt) def parse_char(txt): """ Parse a c character :param txt: character to parse :type txt: string :return: a character literal :rtype: string """ if not txt: raise PreprocError('attempted to parse a null char') if txt[0] != '\\': return ord(txt) c = txt[1] if c == 'x': if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16) return int(txt[2:], 16) elif c.isdigit(): if c == '0' and len(txt)==2: return 0 for i in 3, 2, 1: if len(txt) > i and txt[1:1+i].isdigit(): return (1+i, int(txt[1:1+i], 8)) else: try: return chr_esc[c] except KeyError: raise PreprocError('could not parse char literal %r' % txt) def tokenize(s): """ Convert a string into a list of tokens (shlex.split does not apply to c/c++/d) :param s: input to tokenize :type s: string :return: a list of tokens :rtype: list of tuple(token, value) """ return tokenize_private(s)[:] # force a copy of the results def tokenize_private(s): ret = [] for match in re_clexer.finditer(s): m = match.group for name in tok_types: v = m(name) if v: if name == IDENT: if v in g_optrans: name = OP elif v.lower() == "true": v = 1 name = NUM elif v.lower() == "false": v = 0 name = NUM elif name == NUM: if m('oct'): v = int(v, 8) elif m('hex'): v = int(m('hex'), 16) elif m('n0'): v = m('n0') else: v = m('char') if v: v = parse_char(v) else: v = m('n2') or m('n4') elif name == OP: if v == '%:': v = '#' elif v == '%:%:': v = '##' elif name == STR: # remove the quotes around the string v = v[1:-1] ret.append((name, v)) break return ret def format_defines(lst): ret = [] for y in lst: if y: pos = y.find('=') if pos == -1: # "-DFOO" should give "#define FOO 1" ret.append(y) elif pos > 0: # all others are assumed to be -DX=Y ret.append('%s %s' % (y[:pos], y[pos+1:])) else: raise ValueError('Invalid define expression %r' % y) return ret class c_parser(object): """ Used by :py:func:`waflib.Tools.c_preproc.scan` to parse c/h files. Note that by default, only project headers are parsed. """ def __init__(self, nodepaths=None, defines=None): self.lines = [] """list of lines read""" if defines is None: self.defs = {} else: self.defs = dict(defines) # make a copy self.state = [] self.count_files = 0 self.currentnode_stack = [] self.nodepaths = nodepaths or [] """Include paths""" self.nodes = [] """List of :py:class:`waflib.Node.Node` found so far""" self.names = [] """List of file names that could not be matched by any file""" self.curfile = '' """Current file""" self.ban_includes = set() """Includes that must not be read (#pragma once)""" self.listed = set() """Include nodes/names already listed to avoid duplicates in self.nodes/self.names""" def cached_find_resource(self, node, filename): """ Find a file from the input directory :param node: directory :type node: :py:class:`waflib.Node.Node` :param filename: header to find :type filename: string :return: the node if found, or None :rtype: :py:class:`waflib.Node.Node` """ try: cache = node.ctx.preproc_cache_node except AttributeError: cache = node.ctx.preproc_cache_node = Utils.lru_cache(FILE_CACHE_SIZE) key = (node, filename) try: return cache[key] except KeyError: ret = node.find_resource(filename) if ret: if getattr(ret, 'children', None): ret = None elif ret.is_child_of(node.ctx.bldnode): tmp = node.ctx.srcnode.search_node(ret.path_from(node.ctx.bldnode)) if tmp and getattr(tmp, 'children', None): ret = None cache[key] = ret return ret def tryfind(self, filename, kind='"', env=None): """ Try to obtain a node from the filename based from the include paths. Will add the node found to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` or the file name to :py:attr:`waflib.Tools.c_preproc.c_parser.names` if no corresponding file is found. Called by :py:attr:`waflib.Tools.c_preproc.c_parser.start`. :param filename: header to find :type filename: string :return: the node if found :rtype: :py:class:`waflib.Node.Node` """ if filename.endswith('.moc'): # we could let the qt4 module use a subclass, but then the function "scan" below must be duplicated # in the qt4 and in the qt5 classes. So we have two lines here and it is sufficient. self.names.append(filename) return None self.curfile = filename found = None if kind == '"': if env.MSVC_VERSION: for n in reversed(self.currentnode_stack): found = self.cached_find_resource(n, filename) if found: break else: found = self.cached_find_resource(self.currentnode_stack[-1], filename) if not found: for n in self.nodepaths: found = self.cached_find_resource(n, filename) if found: break listed = self.listed if found and not found in self.ban_includes: if found not in listed: listed.add(found) self.nodes.append(found) self.addlines(found) else: if filename not in listed: listed.add(filename) self.names.append(filename) return found def filter_comments(self, node): """ Filter the comments from a c/h file, and return the preprocessor lines. The regexps :py:attr:`waflib.Tools.c_preproc.re_cpp`, :py:attr:`waflib.Tools.c_preproc.re_nl` and :py:attr:`waflib.Tools.c_preproc.re_lines` are used internally. :return: the preprocessor directives as a list of (keyword, line) :rtype: a list of string pairs """ # return a list of tuples : keyword, line code = node.read() if use_trigraphs: for (a, b) in trig_def: code = code.split(a).join(b) code = re_nl.sub('', code) code = re_cpp.sub(repl, code) return re_lines.findall(code) def parse_lines(self, node): try: cache = node.ctx.preproc_cache_lines except AttributeError: cache = node.ctx.preproc_cache_lines = Utils.lru_cache(LINE_CACHE_SIZE) try: return cache[node] except KeyError: cache[node] = lines = self.filter_comments(node) lines.append((POPFILE, '')) lines.reverse() return lines def addlines(self, node): """ Add the lines from a header in the list of preprocessor lines to parse :param node: header :type node: :py:class:`waflib.Node.Node` """ self.currentnode_stack.append(node.parent) self.count_files += 1 if self.count_files > recursion_limit: # issue #812 raise PreprocError('recursion limit exceeded') if Logs.verbose: Logs.debug('preproc: reading file %r', node) try: lines = self.parse_lines(node) except EnvironmentError: raise PreprocError('could not read the file %r' % node) except Exception: if Logs.verbose > 0: Logs.error('parsing %r failed %s', node, traceback.format_exc()) else: self.lines.extend(lines) def start(self, node, env): """ Preprocess a source file to obtain the dependencies, which are accumulated to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` and :py:attr:`waflib.Tools.c_preproc.c_parser.names`. :param node: source file :type node: :py:class:`waflib.Node.Node` :param env: config set containing additional defines to take into account :type env: :py:class:`waflib.ConfigSet.ConfigSet` """ Logs.debug('preproc: scanning %s (in %s)', node.name, node.parent.name) self.current_file = node self.addlines(node) # macros may be defined on the command-line, so they must be parsed as if they were part of the file if env.DEFINES: lst = format_defines(env.DEFINES) lst.reverse() self.lines.extend([('define', x) for x in lst]) while self.lines: (token, line) = self.lines.pop() if token == POPFILE: self.count_files -= 1 self.currentnode_stack.pop() continue try: state = self.state # make certain we define the state if we are about to enter in an if block if token[:2] == 'if': state.append(undefined) elif token == 'endif': state.pop() # skip lines when in a dead 'if' branch, wait for the endif if token[0] != 'e': if skipped in self.state or ignored in self.state: continue if token == 'if': ret = eval_macro(tokenize(line), self.defs) if ret: state[-1] = accepted else: state[-1] = ignored elif token == 'ifdef': m = re_mac.match(line) if m and m.group() in self.defs: state[-1] = accepted else: state[-1] = ignored elif token == 'ifndef': m = re_mac.match(line) if m and m.group() in self.defs: state[-1] = ignored else: state[-1] = accepted elif token == 'include' or token == 'import': (kind, inc) = extract_include(line, self.defs) self.current_file = self.tryfind(inc, kind, env) if token == 'import': self.ban_includes.add(self.current_file) elif token == 'elif': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: if eval_macro(tokenize(line), self.defs): state[-1] = accepted elif token == 'else': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: state[-1] = accepted elif token == 'define': try: self.defs[self.define_name(line)] = line except AttributeError: raise PreprocError('Invalid define line %r' % line) elif token == 'undef': m = re_mac.match(line) if m and m.group() in self.defs: self.defs.__delitem__(m.group()) #print "undef %s" % name elif token == 'pragma': if re_pragma_once.match(line.lower()): self.ban_includes.add(self.current_file) except Exception as e: if Logs.verbose: Logs.debug('preproc: line parsing failed (%s): %s %s', e, line, traceback.format_exc()) def define_name(self, line): """ :param line: define line :type line: string :rtype: string :return: the define name """ return re_mac.match(line).group() def scan(task): """ Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind:: #include some_macro() This function is bound as a task method on :py:class:`waflib.Tools.c.c` and :py:class:`waflib.Tools.cxx.cxx` for example """ try: incn = task.generator.includes_nodes except AttributeError: raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": ' % task.generator) if go_absolute: nodepaths = incn + [task.generator.bld.root.find_dir(x) for x in standard_includes] else: nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)] tmp = c_parser(nodepaths) tmp.start(task.inputs[0], task.env) return (tmp.nodes, tmp.names) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/c_tests.py0000660000000000000000000001375300000000000022534 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) """ Various configuration tests. """ from waflib import Task from waflib.Configure import conf from waflib.TaskGen import feature, before_method, after_method LIB_CODE = ''' #ifdef _MSC_VER #define testEXPORT __declspec(dllexport) #else #define testEXPORT #endif testEXPORT int lib_func(void) { return 9; } ''' MAIN_CODE = ''' #ifdef _MSC_VER #define testEXPORT __declspec(dllimport) #else #define testEXPORT #endif testEXPORT int lib_func(void); int main(int argc, char **argv) { (void)argc; (void)argv; return !(lib_func() == 9); } ''' @feature('link_lib_test') @before_method('process_source') def link_lib_test_fun(self): """ The configuration test :py:func:`waflib.Configure.run_build` declares a unique task generator, so we need to create other task generators from here to check if the linker is able to link libraries. """ def write_test_file(task): task.outputs[0].write(task.generator.code) rpath = [] if getattr(self, 'add_rpath', False): rpath = [self.bld.path.get_bld().abspath()] mode = self.mode m = '%s %s' % (mode, mode) ex = self.test_exec and 'test_exec' or '' bld = self.bld bld(rule=write_test_file, target='test.' + mode, code=LIB_CODE) bld(rule=write_test_file, target='main.' + mode, code=MAIN_CODE) bld(features='%sshlib' % m, source='test.' + mode, target='test') bld(features='%sprogram %s' % (m, ex), source='main.' + mode, target='app', use='test', rpath=rpath) @conf def check_library(self, mode=None, test_exec=True): """ Checks if libraries can be linked with the current linker. Uses :py:func:`waflib.Tools.c_tests.link_lib_test_fun`. :param mode: c or cxx or d :type mode: string """ if not mode: mode = 'c' if self.env.CXX: mode = 'cxx' self.check( compile_filename = [], features = 'link_lib_test', msg = 'Checking for libraries', mode = mode, test_exec = test_exec) ######################################################################################## INLINE_CODE = ''' typedef int foo_t; static %s foo_t static_foo () {return 0; } %s foo_t foo () { return 0; } ''' INLINE_VALUES = ['inline', '__inline__', '__inline'] @conf def check_inline(self, **kw): """ Checks for the right value for inline macro. Define INLINE_MACRO to 1 if the define is found. If the inline macro is not 'inline', add a define to the ``config.h`` (#define inline __inline__) :param define_name: define INLINE_MACRO by default to 1 if the macro is defined :type define_name: string :param features: by default *c* or *cxx* depending on the compiler present :type features: list of string """ self.start_msg('Checking for inline') if not 'define_name' in kw: kw['define_name'] = 'INLINE_MACRO' if not 'features' in kw: if self.env.CXX: kw['features'] = ['cxx'] else: kw['features'] = ['c'] for x in INLINE_VALUES: kw['fragment'] = INLINE_CODE % (x, x) try: self.check(**kw) except self.errors.ConfigurationError: continue else: self.end_msg(x) if x != 'inline': self.define('inline', x, quote=False) return x self.fatal('could not use inline functions') ######################################################################################## LARGE_FRAGMENT = '''#include int main(int argc, char **argv) { (void)argc; (void)argv; return !(sizeof(off_t) >= 8); } ''' @conf def check_large_file(self, **kw): """ Checks for large file support and define the macro HAVE_LARGEFILE The test is skipped on win32 systems (DEST_BINFMT == pe). :param define_name: define to set, by default *HAVE_LARGEFILE* :type define_name: string :param execute: execute the test (yes by default) :type execute: bool """ if not 'define_name' in kw: kw['define_name'] = 'HAVE_LARGEFILE' if not 'execute' in kw: kw['execute'] = True if not 'features' in kw: if self.env.CXX: kw['features'] = ['cxx', 'cxxprogram'] else: kw['features'] = ['c', 'cprogram'] kw['fragment'] = LARGE_FRAGMENT kw['msg'] = 'Checking for large file support' ret = True try: if self.env.DEST_BINFMT != 'pe': ret = self.check(**kw) except self.errors.ConfigurationError: pass else: if ret: return True kw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64' kw['defines'] = ['_FILE_OFFSET_BITS=64'] try: ret = self.check(**kw) except self.errors.ConfigurationError: pass else: self.define('_FILE_OFFSET_BITS', 64) return ret self.fatal('There is no support for large files') ######################################################################################## ENDIAN_FRAGMENT = ''' #ifdef _MSC_VER #define testshlib_EXPORT __declspec(dllexport) #else #define testshlib_EXPORT #endif short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int testshlib_EXPORT use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; ''' class grep_for_endianness(Task.Task): """ Task that reads a binary and tries to determine the endianness """ color = 'PINK' def run(self): txt = self.inputs[0].read(flags='rb').decode('latin-1') if txt.find('LiTTleEnDian') > -1: self.generator.tmp.append('little') elif txt.find('BIGenDianSyS') > -1: self.generator.tmp.append('big') else: return -1 @feature('grep_for_endianness') @after_method('apply_link') def grep_for_endianness_fun(self): """ Used by the endianness configuration test """ self.create_task('grep_for_endianness', self.link_task.outputs[0]) @conf def check_endianness(self): """ Executes a configuration test to determine the endianness """ tmp = [] def check_msg(self): return tmp[0] self.check(fragment=ENDIAN_FRAGMENT, features='c cshlib grep_for_endianness', msg='Checking for endianness', define='ENDIANNESS', tmp=tmp, okmsg=check_msg, confcache=None) return tmp[0] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/ccroot.py0000660000000000000000000006322400000000000022357 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Classes and methods shared by tools providing support for C-like language such as C/C++/D/Assembly/Go (this support module is almost never used alone). """ import os, re from waflib import Task, Utils, Node, Errors, Logs from waflib.TaskGen import after_method, before_method, feature, taskgen_method, extension from waflib.Tools import c_aliases, c_preproc, c_config, c_osx, c_tests from waflib.Configure import conf SYSTEM_LIB_PATHS = ['/usr/lib64', '/usr/lib', '/usr/local/lib64', '/usr/local/lib'] USELIB_VARS = Utils.defaultdict(set) """ Mapping for features to :py:class:`waflib.ConfigSet.ConfigSet` variables. See :py:func:`waflib.Tools.ccroot.propagate_uselib_vars`. """ USELIB_VARS['c'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CCDEPS', 'CFLAGS', 'ARCH']) USELIB_VARS['cxx'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CXXDEPS', 'CXXFLAGS', 'ARCH']) USELIB_VARS['d'] = set(['INCLUDES', 'DFLAGS']) USELIB_VARS['includes'] = set(['INCLUDES', 'FRAMEWORKPATH', 'ARCH']) USELIB_VARS['cprogram'] = USELIB_VARS['cxxprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cshlib'] = USELIB_VARS['cxxshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cstlib'] = USELIB_VARS['cxxstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['dprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['asm'] = set(['ASFLAGS']) # ================================================================================================= @taskgen_method def create_compiled_task(self, name, node): """ Create the compilation task: c, cxx, asm, etc. The output node is created automatically (object file with a typical **.o** extension). The task is appended to the list *compiled_tasks* which is then used by :py:func:`waflib.Tools.ccroot.apply_link` :param name: name of the task class :type name: string :param node: the file to compile :type node: :py:class:`waflib.Node.Node` :return: The task created :rtype: :py:class:`waflib.Task.Task` """ out = '%s.%d.o' % (node.name, self.idx) task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @taskgen_method def to_incnodes(self, inlst): """ Task generator method provided to convert a list of string/nodes into a list of includes folders. The paths are assumed to be relative to the task generator path, except if they begin by **#** in which case they are searched from the top-level directory (``bld.srcnode``). The folders are simply assumed to be existing. The node objects in the list are returned in the output list. The strings are converted into node objects if possible. The node is searched from the source directory, and if a match is found, the equivalent build directory is created and added to the returned list too. When a folder cannot be found, it is ignored. :param inlst: list of folders :type inlst: space-delimited string or a list of string/nodes :rtype: list of :py:class:`waflib.Node.Node` :return: list of include folders as nodes """ lst = [] seen = set() for x in self.to_list(inlst): if x in seen or not x: continue seen.add(x) # with a real lot of targets, it is sometimes interesting to cache the results below if isinstance(x, Node.Node): lst.append(x) else: if os.path.isabs(x): lst.append(self.bld.root.make_node(x) or x) else: if x[0] == '#': p = self.bld.bldnode.make_node(x[1:]) v = self.bld.srcnode.make_node(x[1:]) else: p = self.path.get_bld().make_node(x) v = self.path.make_node(x) if p.is_child_of(self.bld.bldnode): p.mkdir() lst.append(p) lst.append(v) return lst @feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') @after_method('propagate_uselib_vars', 'process_source') def apply_incpaths(self): """ Task generator method that processes the attribute *includes*:: tg = bld(features='includes', includes='.') The folders only need to be relative to the current directory, the equivalent build directory is added automatically (for headers created in the build directory). This enables using a build directory or not (``top == out``). This method will add a list of nodes read by :py:func:`waflib.Tools.ccroot.to_incnodes` in ``tg.env.INCPATHS``, and the list of include paths in ``tg.env.INCLUDES``. """ lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env.INCLUDES) self.includes_nodes = lst cwd = self.get_cwd() self.env.INCPATHS = [x.path_from(cwd) for x in lst] class link_task(Task.Task): """ Base class for all link tasks. A task generator is supposed to have at most one link task bound in the attribute *link_task*. See :py:func:`waflib.Tools.ccroot.apply_link`. .. inheritance-diagram:: waflib.Tools.ccroot.stlink_task waflib.Tools.c.cprogram waflib.Tools.c.cshlib waflib.Tools.cxx.cxxstlib waflib.Tools.cxx.cxxprogram waflib.Tools.cxx.cxxshlib waflib.Tools.d.dprogram waflib.Tools.d.dshlib waflib.Tools.d.dstlib waflib.Tools.ccroot.fake_shlib waflib.Tools.ccroot.fake_stlib waflib.Tools.asm.asmprogram waflib.Tools.asm.asmshlib waflib.Tools.asm.asmstlib """ color = 'YELLOW' weight = 3 """Try to process link tasks as early as possible""" inst_to = None """Default installation path for the link task outputs, or None to disable""" chmod = Utils.O755 """Default installation mode for the link task outputs""" def add_target(self, target): """ Process the *target* attribute to add the platform-specific prefix/suffix such as *.so* or *.exe*. The settings are retrieved from ``env.clsname_PATTERN`` """ if isinstance(target, str): base = self.generator.path if target.startswith('#'): # for those who like flat structures target = target[1:] base = self.generator.bld.bldnode pattern = self.env[self.__class__.__name__ + '_PATTERN'] if not pattern: pattern = '%s' folder, name = os.path.split(target) if self.__class__.__name__.find('shlib') > 0 and getattr(self.generator, 'vnum', None): nums = self.generator.vnum.split('.') if self.env.DEST_BINFMT == 'pe': # include the version in the dll file name, # the import lib file name stays unversioned. name = name + '-' + nums[0] elif self.env.DEST_OS == 'openbsd': pattern = '%s.%s' % (pattern, nums[0]) if len(nums) >= 2: pattern += '.%s' % nums[1] if folder: tmp = folder + os.sep + pattern % name else: tmp = pattern % name target = base.find_or_declare(tmp) self.set_outputs(target) def exec_command(self, *k, **kw): ret = super(link_task, self).exec_command(*k, **kw) if not ret and self.env.DO_MANIFEST: ret = self.exec_mf() return ret def exec_mf(self): """ Create manifest files for VS-like compilers (msvc, ifort, ...) """ if not self.env.MT: return 0 manifest = None for out_node in self.outputs: if out_node.name.endswith('.manifest'): manifest = out_node.abspath() break else: # Should never get here. If we do, it means the manifest file was # never added to the outputs list, thus we don't have a manifest file # to embed, so we just return. return 0 # embedding mode. Different for EXE's and DLL's. # see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx mode = '' for x in Utils.to_list(self.generator.features): if x in ('cprogram', 'cxxprogram', 'fcprogram', 'fcprogram_test'): mode = 1 elif x in ('cshlib', 'cxxshlib', 'fcshlib'): mode = 2 Logs.debug('msvc: embedding manifest in mode %r', mode) lst = [] + self.env.MT lst.extend(Utils.to_list(self.env.MTFLAGS)) lst.extend(['-manifest', manifest]) lst.append('-outputresource:%s;%s' % (self.outputs[0].abspath(), mode)) return super(link_task, self).exec_command(lst) class stlink_task(link_task): """ Base for static link tasks, which use *ar* most of the time. The target is always removed before being written. """ run_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}' chmod = Utils.O644 """Default installation mode for the static libraries""" def rm_tgt(cls): old = cls.run def wrap(self): try: os.remove(self.outputs[0].abspath()) except OSError: pass return old(self) setattr(cls, 'run', wrap) rm_tgt(stlink_task) @feature('skip_stlib_link_deps') @before_method('process_use') def apply_skip_stlib_link_deps(self): """ This enables an optimization in the :py:func:wafilb.Tools.ccroot.processes_use: method that skips dependency and link flag optimizations for targets that generate static libraries (via the :py:class:Tools.ccroot.stlink_task task). The actual behavior is implemented in :py:func:wafilb.Tools.ccroot.processes_use: method so this feature only tells waf to enable the new behavior. """ self.env.SKIP_STLIB_LINK_DEPS = True @feature('c', 'cxx', 'd', 'fc', 'asm') @after_method('process_source') def apply_link(self): """ Collect the tasks stored in ``compiled_tasks`` (created by :py:func:`waflib.Tools.ccroot.create_compiled_task`), and use the outputs for a new instance of :py:class:`waflib.Tools.ccroot.link_task`. The class to use is the first link task matching a name from the attribute *features*, for example:: def build(bld): tg = bld(features='cxx cxxprogram cprogram', source='main.c', target='app') will create the task ``tg.link_task`` as a new instance of :py:class:`waflib.Tools.cxx.cxxprogram` """ for x in self.features: if x == 'cprogram' and 'cxx' in self.features: # limited compat x = 'cxxprogram' elif x == 'cshlib' and 'cxx' in self.features: x = 'cxxshlib' if x in Task.classes: if issubclass(Task.classes[x], link_task): link = x break else: return objs = [t.outputs[0] for t in getattr(self, 'compiled_tasks', [])] self.link_task = self.create_task(link, objs) self.link_task.add_target(self.target) # remember that the install paths are given by the task generators try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: # install a copy of the node list we have at this moment (implib not added) self.install_task = self.add_install_files( install_to=inst_to, install_from=self.link_task.outputs[:], chmod=self.link_task.chmod, task=self.link_task) @taskgen_method def use_rec(self, name, **kw): """ Processes the ``use`` keyword recursively. This method is kind of private and only meant to be used from ``process_use`` """ if name in self.tmp_use_not or name in self.tmp_use_seen: return try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) self.tmp_use_not.add(name) return self.tmp_use_seen.append(name) y.post() # bind temporary attributes on the task generator y.tmp_use_objects = objects = kw.get('objects', True) y.tmp_use_stlib = stlib = kw.get('stlib', True) try: link_task = y.link_task except AttributeError: y.tmp_use_var = '' else: objects = False if not isinstance(link_task, stlink_task): stlib = False y.tmp_use_var = 'LIB' else: y.tmp_use_var = 'STLIB' p = self.tmp_use_prec for x in self.to_list(getattr(y, 'use', [])): if self.env["STLIB_" + x]: continue try: p[x].append(name) except KeyError: p[x] = [name] self.use_rec(x, objects=objects, stlib=stlib) @feature('c', 'cxx', 'd', 'use', 'fc') @before_method('apply_incpaths', 'propagate_uselib_vars') @after_method('apply_link', 'process_source') def process_use(self): """ Process the ``use`` attribute which contains a list of task generator names:: def build(bld): bld.shlib(source='a.c', target='lib1') bld.program(source='main.c', target='app', use='lib1') See :py:func:`waflib.Tools.ccroot.use_rec`. """ use_not = self.tmp_use_not = set() self.tmp_use_seen = [] # we would like an ordered set use_prec = self.tmp_use_prec = {} self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'use', [])) for x in names: self.use_rec(x) for x in use_not: if x in use_prec: del use_prec[x] # topological sort out = self.tmp_use_sorted = [] tmp = [] for x in self.tmp_use_seen: for k in use_prec.values(): if x in k: break else: tmp.append(x) while tmp: e = tmp.pop() out.append(e) try: nlst = use_prec[e] except KeyError: pass else: del use_prec[e] for x in nlst: for y in use_prec: if x in use_prec[y]: break else: tmp.append(x) if use_prec: raise Errors.WafError('Cycle detected in the use processing %r' % use_prec) out.reverse() link_task = getattr(self, 'link_task', None) for x in out: y = self.bld.get_tgen_by_name(x) var = y.tmp_use_var if var and link_task: if self.env.SKIP_STLIB_LINK_DEPS and isinstance(link_task, stlink_task): # If the skip_stlib_link_deps feature is enabled then we should # avoid adding lib deps to the stlink_task instance. pass elif var == 'LIB' or y.tmp_use_stlib or x in names: self.env.append_value(var, [y.target[y.target.rfind(os.sep) + 1:]]) self.link_task.dep_nodes.extend(y.link_task.outputs) tmp_path = y.link_task.outputs[0].parent.path_from(self.get_cwd()) self.env.append_unique(var + 'PATH', [tmp_path]) else: if y.tmp_use_objects: self.add_objects_from_tgen(y) if getattr(y, 'export_includes', None): # self.includes may come from a global variable #2035 self.includes = self.includes + y.to_incnodes(y.export_includes) if getattr(y, 'export_defines', None): self.env.append_value('DEFINES', self.to_list(y.export_defines)) # and finally, add the use variables (no recursion needed) for x in names: try: y = self.bld.get_tgen_by_name(x) except Errors.WafError: if not self.env['STLIB_' + x] and not x in self.uselib: self.uselib.append(x) else: for k in self.to_list(getattr(y, 'use', [])): if not self.env['STLIB_' + k] and not k in self.uselib: self.uselib.append(k) @taskgen_method def accept_node_to_link(self, node): """ PRIVATE INTERNAL USE ONLY """ return not node.name.endswith('.pdb') @taskgen_method def add_objects_from_tgen(self, tg): """ Add the objects from the depending compiled tasks as link task inputs. Some objects are filtered: for instance, .pdb files are added to the compiled tasks but not to the link tasks (to avoid errors) PRIVATE INTERNAL USE ONLY """ try: link_task = self.link_task except AttributeError: pass else: for tsk in getattr(tg, 'compiled_tasks', []): for x in tsk.outputs: if self.accept_node_to_link(x): link_task.inputs.append(x) @taskgen_method def get_uselib_vars(self): """ :return: the *uselib* variables associated to the *features* attribute (see :py:attr:`waflib.Tools.ccroot.USELIB_VARS`) :rtype: list of string """ _vars = set() for x in self.features: if x in USELIB_VARS: _vars |= USELIB_VARS[x] return _vars @feature('c', 'cxx', 'd', 'fc', 'javac', 'cs', 'uselib', 'asm') @after_method('process_use') def propagate_uselib_vars(self): """ Process uselib variables for adding flags. For example, the following target:: def build(bld): bld.env.AFLAGS_aaa = ['bar'] from waflib.Tools.ccroot import USELIB_VARS USELIB_VARS['aaa'] = ['AFLAGS'] tg = bld(features='aaa', aflags='test') The *aflags* attribute will be processed and this method will set:: tg.env.AFLAGS = ['bar', 'test'] """ _vars = self.get_uselib_vars() env = self.env app = env.append_value feature_uselib = self.features + self.to_list(getattr(self, 'uselib', [])) for var in _vars: y = var.lower() val = getattr(self, y, []) if val: app(var, self.to_list(val)) for x in feature_uselib: val = env['%s_%s' % (var, x)] if val: app(var, val) # ============ the code above must not know anything about import libs ========== @feature('cshlib', 'cxxshlib', 'fcshlib') @after_method('apply_link') def apply_implib(self): """ Handle dlls and their import libs on Windows-like systems. A ``.dll.a`` file called *import library* is generated. It must be installed as it is required for linking the library. """ if not self.env.DEST_BINFMT == 'pe': return dll = self.link_task.outputs[0] if isinstance(self.target, Node.Node): name = self.target.name else: name = os.path.split(self.target)[1] implib = self.env.implib_PATTERN % name implib = dll.parent.find_or_declare(implib) self.env.append_value('LINKFLAGS', self.env.IMPLIB_ST % implib.bldpath()) self.link_task.outputs.append(implib) if getattr(self, 'defs', None) and self.env.DEST_BINFMT == 'pe': node = self.path.find_resource(self.defs) if not node: raise Errors.WafError('invalid def file %r' % self.defs) if self.env.def_PATTERN: self.env.append_value('LINKFLAGS', self.env.def_PATTERN % node.path_from(self.get_cwd())) self.link_task.dep_nodes.append(node) else: # gcc for windows takes *.def file as input without any special flag self.link_task.inputs.append(node) # where to put the import library if getattr(self, 'install_task', None): try: # user has given a specific installation path for the import library inst_to = self.install_path_implib except AttributeError: try: # user has given an installation path for the main library, put the import library in it inst_to = self.install_path except AttributeError: # else, put the library in BINDIR and the import library in LIBDIR inst_to = '${IMPLIBDIR}' self.install_task.install_to = '${BINDIR}' if not self.env.IMPLIBDIR: self.env.IMPLIBDIR = self.env.LIBDIR self.implib_install_task = self.add_install_files(install_to=inst_to, install_from=implib, chmod=self.link_task.chmod, task=self.link_task) # ============ the code above must not know anything about vnum processing on unix platforms ========= re_vnum = re.compile('^([1-9]\\d*|0)([.]([1-9]\\d*|0)){0,2}?$') @feature('cshlib', 'cxxshlib', 'dshlib', 'fcshlib', 'vnum') @after_method('apply_link', 'propagate_uselib_vars') def apply_vnum(self): """ Enforce version numbering on shared libraries. The valid version numbers must have either zero or two dots:: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16') In this example on Linux platform, ``libfoo.so`` is installed as ``libfoo.so.14.15.16``, and the following symbolic links are created: * ``libfoo.so → libfoo.so.14.15.16`` * ``libfoo.so.14 → libfoo.so.14.15.16`` By default, the library will be assigned SONAME ``libfoo.so.14``, effectively declaring ABI compatibility between all minor and patch releases for the major version of the library. When necessary, the compatibility can be explicitly defined using `cnum` parameter: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16', cnum='14.15') In this case, the assigned SONAME will be ``libfoo.so.14.15`` with ABI compatibility only between path releases for a specific major and minor version of the library. On OS X platform, install-name parameter will follow the above logic for SONAME with exception that it also specifies an absolute path (based on install_path) of the library. """ if not getattr(self, 'vnum', '') or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'): return link = self.link_task if not re_vnum.match(self.vnum): raise Errors.WafError('Invalid vnum %r for target %r' % (self.vnum, getattr(self, 'name', self))) nums = self.vnum.split('.') node = link.outputs[0] cnum = getattr(self, 'cnum', str(nums[0])) cnums = cnum.split('.') if len(cnums)>len(nums) or nums[0:len(cnums)] != cnums: raise Errors.WafError('invalid compatibility version %s' % cnum) libname = node.name if libname.endswith('.dylib'): name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum) name2 = libname.replace('.dylib', '.%s.dylib' % cnum) else: name3 = libname + '.' + self.vnum name2 = libname + '.' + cnum # add the so name for the ld linker - to disable, just unset env.SONAME_ST if self.env.SONAME_ST: v = self.env.SONAME_ST % name2 self.env.append_value('LINKFLAGS', v.split()) # the following task is just to enable execution from the build dir :-/ if self.env.DEST_OS != 'openbsd': outs = [node.parent.make_node(name3)] if name2 != name3: outs.append(node.parent.make_node(name2)) self.create_task('vnum', node, outs) if getattr(self, 'install_task', None): self.install_task.hasrun = Task.SKIPPED self.install_task.no_errcheck_out = True path = self.install_task.install_to if self.env.DEST_OS == 'openbsd': libname = self.link_task.outputs[0].name t1 = self.add_install_as(install_to='%s/%s' % (path, libname), install_from=node, chmod=self.link_task.chmod) self.vnum_install_task = (t1,) else: t1 = self.add_install_as(install_to=path + os.sep + name3, install_from=node, chmod=self.link_task.chmod) t3 = self.add_symlink_as(install_to=path + os.sep + libname, install_from=name3) if name2 != name3: t2 = self.add_symlink_as(install_to=path + os.sep + name2, install_from=name3) self.vnum_install_task = (t1, t2, t3) else: self.vnum_install_task = (t1, t3) if '-dynamiclib' in self.env.LINKFLAGS: # this requires after(propagate_uselib_vars) try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: p = Utils.subst_vars(inst_to, self.env) path = os.path.join(p, name2) self.env.append_value('LINKFLAGS', ['-install_name', path]) self.env.append_value('LINKFLAGS', '-Wl,-compatibility_version,%s' % cnum) self.env.append_value('LINKFLAGS', '-Wl,-current_version,%s' % self.vnum) class vnum(Task.Task): """ Create the symbolic links for a versioned shared library. Instances are created by :py:func:`waflib.Tools.ccroot.apply_vnum` """ color = 'CYAN' ext_in = ['.bin'] def keyword(self): return 'Symlinking' def run(self): for x in self.outputs: path = x.abspath() try: os.remove(path) except OSError: pass try: os.symlink(self.inputs[0].name, path) except OSError: return 1 class fake_shlib(link_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME class fake_stlib(stlink_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME @conf def read_shlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes:: def build(bld): bld.read_shlib('m') bld.program(source='main.c', use='m') """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='shlib', export_includes=export_includes, export_defines=export_defines) @conf def read_stlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system static library, enabling a use as a local library. Will trigger a rebuild if the file changes. """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='stlib', export_includes=export_includes, export_defines=export_defines) lib_patterns = { 'shlib' : ['lib%s.so', '%s.so', 'lib%s.dylib', 'lib%s.dll', '%s.dll'], 'stlib' : ['lib%s.a', '%s.a', 'lib%s.dll', '%s.dll', 'lib%s.lib', '%s.lib'], } @feature('fake_lib') def process_lib(self): """ Find the location of a foreign library. Used by :py:class:`waflib.Tools.ccroot.read_shlib` and :py:class:`waflib.Tools.ccroot.read_stlib`. """ node = None names = [x % self.name for x in lib_patterns[self.lib_type]] for x in self.lib_paths + [self.path] + SYSTEM_LIB_PATHS: if not isinstance(x, Node.Node): x = self.bld.root.find_node(x) or self.path.find_node(x) if not x: continue for y in names: node = x.find_node(y) if node: try: Utils.h_file(node.abspath()) except EnvironmentError: raise ValueError('Could not read %r' % y) break else: continue break else: raise Errors.WafError('could not find library %r' % self.name) self.link_task = self.create_task('fake_%s' % self.lib_type, [], [node]) self.target = self.name class fake_o(Task.Task): def runnable_status(self): return Task.SKIP_ME @extension('.o', '.obj') def add_those_o_files(self, node): tsk = self.create_task('fake_o', [], node) try: self.compiled_tasks.append(tsk) except AttributeError: self.compiled_tasks = [tsk] @feature('fake_obj') @before_method('process_source') def process_objs(self): """ Puts object files in the task generator outputs """ for node in self.to_nodes(self.source): self.add_those_o_files(node) self.source = [] @conf def read_object(self, obj): """ Read an object file, enabling injection in libs/programs. Will trigger a rebuild if the file changes. :param obj: object file path, as string or Node """ if not isinstance(obj, self.path.__class__): obj = self.path.find_resource(obj) return self(features='fake_obj', source=obj, name=obj.name) @feature('cxxprogram', 'cprogram') @after_method('apply_link', 'process_use') def set_full_paths_hpux(self): """ On hp-ux, extend the libpaths and static library paths to absolute paths """ if self.env.DEST_OS != 'hp-ux': return base = self.bld.bldnode.abspath() for var in ['LIBPATH', 'STLIBPATH']: lst = [] for x in self.env[var]: if x.startswith('/'): lst.append(x) else: lst.append(os.path.normpath(os.path.join(base, x))) self.env[var] = lst ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/clang.py0000660000000000000000000000115700000000000022147 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Krzysztof Kosiński 2014 """ Detect the Clang C compiler """ from waflib.Tools import ccroot, ar, gcc from waflib.Configure import conf @conf def find_clang(conf): """ Finds the program clang and executes it to ensure it really is clang """ cc = conf.find_program('clang', var='CC') conf.get_cc_version(cc, clang=True) conf.env.CC_NAME = 'clang' def configure(conf): conf.find_clang() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/clangxx.py0000660000000000000000000000121000000000000022515 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) """ Detect the Clang++ C++ compiler """ from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf @conf def find_clangxx(conf): """ Finds the program clang++, and executes it to ensure it really is clang++ """ cxx = conf.find_program('clang++', var='CXX') conf.get_cc_version(cxx, clang=True) conf.env.CXX_NAME = 'clang' def configure(conf): conf.find_clangxx() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/compiler_c.py0000660000000000000000000000612600000000000023200 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de, 2007 (pmarat) """ Try to detect a C compiler from the list of supported compilers (gcc, msvc, etc):: def options(opt): opt.load('compiler_c') def configure(cnf): cnf.load('compiler_c') def build(bld): bld.program(source='main.c', target='app') The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_c.c_compiler`. To register a new C compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: from waflib.Tools.compiler_c import c_compiler c_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] def options(opt): opt.load('compiler_c') def configure(cnf): cnf.load('compiler_c') def build(bld): bld.program(source='main.c', target='app') Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: $ CC=clang waf configure """ import re from waflib.Tools import ccroot from waflib import Utils from waflib.Logs import debug c_compiler = { 'win32': ['msvc', 'gcc', 'clang'], 'cygwin': ['gcc', 'clang'], 'darwin': ['clang', 'gcc'], 'aix': ['xlc', 'gcc', 'clang'], 'linux': ['gcc', 'clang', 'icc'], 'sunos': ['suncc', 'gcc'], 'irix': ['gcc', 'irixcc'], 'hpux': ['gcc'], 'osf1V': ['gcc'], 'gnu': ['gcc', 'clang'], 'java': ['gcc', 'msvc', 'clang', 'icc'], 'default':['clang', 'gcc'], } """ Dict mapping platform names to Waf tools finding specific C compilers:: from waflib.Tools.compiler_c import c_compiler c_compiler['linux'] = ['gcc', 'icc', 'suncc'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = c_compiler.get(build_platform, c_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable C compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_c_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_c')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (C compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) debug('compiler_c: %r', e) else: if conf.env.CC: conf.end_msg(conf.env.get_flat('CC')) conf.env.COMPILER_CC = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a C compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-c-compiler=gcc """ test_for_compiler = default_compilers() opt.load_special_tools('c_*.py', ban=['c_dumbpreproc.py']) cc_compiler_opts = opt.add_option_group('Configuration options') cc_compiler_opts.add_option('--check-c-compiler', default=None, help='list of C compilers to try [%s]' % test_for_compiler, dest="check_c_compiler") for x in test_for_compiler.split(): opt.load('%s' % x) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/compiler_cxx.py0000660000000000000000000000621700000000000023561 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de 2007 (pmarat) """ Try to detect a C++ compiler from the list of supported compilers (g++, msvc, etc):: def options(opt): opt.load('compiler_cxx') def configure(cnf): cnf.load('compiler_cxx') def build(bld): bld.program(source='main.cpp', target='app') The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_cxx.cxx_compiler`. To register a new C++ compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] def options(opt): opt.load('compiler_cxx') def configure(cnf): cnf.load('compiler_cxx') def build(bld): bld.program(source='main.c', target='app') Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: $ CXX=clang waf configure """ import re from waflib.Tools import ccroot from waflib import Utils from waflib.Logs import debug cxx_compiler = { 'win32': ['msvc', 'g++', 'clang++'], 'cygwin': ['g++', 'clang++'], 'darwin': ['clang++', 'g++'], 'aix': ['xlc++', 'g++', 'clang++'], 'linux': ['g++', 'clang++', 'icpc'], 'sunos': ['sunc++', 'g++'], 'irix': ['g++'], 'hpux': ['g++'], 'osf1V': ['g++'], 'gnu': ['g++', 'clang++'], 'java': ['g++', 'msvc', 'clang++', 'icpc'], 'default': ['clang++', 'g++'] } """ Dict mapping the platform names to Waf tools finding specific C++ compilers:: from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['linux'] = ['gxx', 'icpc', 'suncxx'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = cxx_compiler.get(build_platform, cxx_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable C++ compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_cxx_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_cxx')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (C++ compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) debug('compiler_cxx: %r', e) else: if conf.env.CXX: conf.end_msg(conf.env.get_flat('CXX')) conf.env.COMPILER_CXX = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a C++ compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-cxx-compiler=gxx """ test_for_compiler = default_compilers() opt.load_special_tools('cxx_*.py') cxx_compiler_opts = opt.add_option_group('Configuration options') cxx_compiler_opts.add_option('--check-cxx-compiler', default=None, help='list of C++ compilers to try [%s]' % test_for_compiler, dest="check_cxx_compiler") for x in test_for_compiler.split(): opt.load('%s' % x) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/compiler_d.py0000660000000000000000000000433100000000000023175 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2016-2018 (ita) """ Try to detect a D compiler from the list of supported compilers:: def options(opt): opt.load('compiler_d') def configure(cnf): cnf.load('compiler_d') def build(bld): bld.program(source='main.d', target='app') Only three D compilers are really present at the moment: * gdc * dmd, the ldc compiler having a very similar command-line interface * ldc2 """ import re from waflib import Utils, Logs d_compiler = { 'default' : ['gdc', 'dmd', 'ldc2'] } """ Dict mapping the platform names to lists of names of D compilers to try, in order of preference:: from waflib.Tools.compiler_d import d_compiler d_compiler['default'] = ['gdc', 'dmd', 'ldc2'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = d_compiler.get(build_platform, d_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable D compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_d_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_d')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (D compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) Logs.debug('compiler_d: %r', e) else: if conf.env.D: conf.end_msg(conf.env.get_flat('D')) conf.env.COMPILER_D = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a D compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-d-compiler=dmd """ test_for_compiler = default_compilers() d_compiler_opts = opt.add_option_group('Configuration options') d_compiler_opts.add_option('--check-d-compiler', default=None, help='list of D compilers to try [%s]' % test_for_compiler, dest='check_d_compiler') for x in test_for_compiler.split(): opt.load('%s' % x) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/compiler_fc.py0000660000000000000000000000416200000000000023344 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 import re from waflib import Utils, Logs from waflib.Tools import fc fc_compiler = { 'win32' : ['gfortran','ifort'], 'darwin' : ['gfortran', 'g95', 'ifort'], 'linux' : ['gfortran', 'g95', 'ifort'], 'java' : ['gfortran', 'g95', 'ifort'], 'default': ['gfortran'], 'aix' : ['gfortran'] } """ Dict mapping the platform names to lists of names of Fortran compilers to try, in order of preference:: from waflib.Tools.compiler_c import c_compiler c_compiler['linux'] = ['gfortran', 'g95', 'ifort'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = fc_compiler.get(build_platform, fc_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable Fortran compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_fortran_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_fc')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (Fortran compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) Logs.debug('compiler_fortran: %r', e) else: if conf.env.FC: conf.end_msg(conf.env.get_flat('FC')) conf.env.COMPILER_FORTRAN = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a Fortran compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-fortran-compiler=ifort """ test_for_compiler = default_compilers() opt.load_special_tools('fc_*.py') fortran_compiler_opts = opt.add_option_group('Configuration options') fortran_compiler_opts.add_option('--check-fortran-compiler', default=None, help='list of Fortran compiler to try [%s]' % test_for_compiler, dest="check_fortran_compiler") for x in test_for_compiler.split(): opt.load('%s' % x) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/cs.py0000660000000000000000000001437500000000000021476 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ C# support. A simple example:: def configure(conf): conf.load('cs') def build(bld): bld(features='cs', source='main.cs', gen='foo') Note that the configuration may compile C# snippets:: FRAG = ''' namespace Moo { public class Test { public static int Main(string[] args) { return 0; } } }''' def configure(conf): conf.check(features='cs', fragment=FRAG, compile_filename='test.cs', gen='test.exe', bintype='exe', csflags=['-pkg:gtk-sharp-2.0'], msg='Checking for Gtksharp support') """ from waflib import Utils, Task, Options, Errors from waflib.TaskGen import before_method, after_method, feature from waflib.Tools import ccroot from waflib.Configure import conf ccroot.USELIB_VARS['cs'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES']) ccroot.lib_patterns['csshlib'] = ['%s'] @feature('cs') @before_method('process_source') def apply_cs(self): """ Create a C# task bound to the attribute *cs_task*. There can be only one C# task by task generator. """ cs_nodes = [] no_nodes = [] for x in self.to_nodes(self.source): if x.name.endswith('.cs'): cs_nodes.append(x) else: no_nodes.append(x) self.source = no_nodes bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe') self.cs_task = tsk = self.create_task('mcs', cs_nodes, self.path.find_or_declare(self.gen)) tsk.env.CSTYPE = '/target:%s' % bintype tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath() self.env.append_value('CSFLAGS', '/platform:%s' % getattr(self, 'platform', 'anycpu')) inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}') if inst_to: # note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644) self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod) @feature('cs') @after_method('apply_cs') def use_cs(self): """ C# applications honor the **use** keyword:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', name='mylib') bld(features='cs', source='Hi.cs', includes='.', bintype='exe', gen='hi.exe', use='mylib', name='hi') """ names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: y = get(x) except Errors.WafError: self.env.append_value('CSFLAGS', '/reference:%s' % x) continue y.post() tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None) if not tsk: self.bld.fatal('cs task has no link task for use %r' % self) self.cs_task.dep_nodes.extend(tsk.outputs) # dependency self.cs_task.set_run_after(tsk) # order (redundant, the order is inferred from the nodes inputs/outputs) self.env.append_value('CSFLAGS', '/reference:%s' % tsk.outputs[0].abspath()) @feature('cs') @after_method('apply_cs', 'use_cs') def debug_cs(self): """ The C# targets may create .mdb or .pdb files:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdebug='full') # csdebug is a value in (True, 'full', 'pdbonly') """ csdebug = getattr(self, 'csdebug', self.env.CSDEBUG) if not csdebug: return node = self.cs_task.outputs[0] if self.env.CS_NAME == 'mono': out = node.parent.find_or_declare(node.name + '.mdb') else: out = node.change_ext('.pdb') self.cs_task.outputs.append(out) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=out) if csdebug == 'pdbonly': val = ['/debug+', '/debug:pdbonly'] elif csdebug == 'full': val = ['/debug+', '/debug:full'] else: val = ['/debug-'] self.env.append_value('CSFLAGS', val) @feature('cs') @after_method('debug_cs') def doc_cs(self): """ The C# targets may create .xml documentation files:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdoc=True) # csdoc is a boolean value """ csdoc = getattr(self, 'csdoc', self.env.CSDOC) if not csdoc: return node = self.cs_task.outputs[0] out = node.change_ext('.xml') self.cs_task.outputs.append(out) if getattr(self, 'install_task', None): self.doc_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=out) self.env.append_value('CSFLAGS', '/doc:%s' % out.abspath()) class mcs(Task.Task): """ Compile C# files """ color = 'YELLOW' run_str = '${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}' def split_argfile(self, cmd): inline = [cmd[0]] infile = [] for x in cmd[1:]: # csc doesn't want /noconfig in @file if x.lower() == '/noconfig': inline.append(x) else: infile.append(self.quote_flag(x)) return (inline, infile) def configure(conf): """ Find a C# compiler, set the variable MCS for the compiler and CS_NAME (mono or csc) """ csc = getattr(Options.options, 'cscbinary', None) if csc: conf.env.MCS = csc conf.find_program(['csc', 'mcs', 'gmcs'], var='MCS') conf.env.ASS_ST = '/r:%s' conf.env.RES_ST = '/resource:%s' conf.env.CS_NAME = 'csc' if str(conf.env.MCS).lower().find('mcs') > -1: conf.env.CS_NAME = 'mono' def options(opt): """ Add a command-line option for the configuration:: $ waf configure --with-csc-binary=/foo/bar/mcs """ opt.add_option('--with-csc-binary', type='string', dest='cscbinary') class fake_csshlib(Task.Task): """ Task used for reading a foreign .net assembly and adding the dependency on it """ color = 'YELLOW' inst_to = None def runnable_status(self): return Task.SKIP_ME @conf def read_csshlib(self, name, paths=[]): """ Read a foreign .net assembly for the *use* system:: def build(bld): bld.read_csshlib('ManagedLibrary.dll', paths=[bld.env.mylibrarypath]) bld(features='cs', source='Hi.cs', bintype='exe', gen='hi.exe', use='ManagedLibrary.dll') :param name: Name of the library :type name: string :param paths: Folders in which the library may be found :type paths: list of string :return: A task generator having the feature *fake_lib* which will call :py:func:`waflib.Tools.ccroot.process_lib` :rtype: :py:class:`waflib.TaskGen.task_gen` """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/cxx.py0000660000000000000000000000312600000000000021663 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) "Base for c++ programs and libraries" from waflib import TaskGen, Task from waflib.Tools import c_preproc from waflib.Tools.ccroot import link_task, stlink_task @TaskGen.extension('.cpp','.cc','.cxx','.C','.c++') def cxx_hook(self, node): "Binds c++ file extensions to create :py:class:`waflib.Tools.cxx.cxx` instances" return self.create_compiled_task('cxx', node) if not '.c' in TaskGen.task_gen.mappings: TaskGen.task_gen.mappings['.c'] = TaskGen.task_gen.mappings['.cpp'] class cxx(Task.Task): "Compiles C++ files into object files" run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' vars = ['CXXDEPS'] # unused variable to depend on, just in case ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] scan = c_preproc.scan class cxxprogram(link_task): "Links object files into c++ programs" run_str = '${LINK_CXX} ${LINKFLAGS} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}' vars = ['LINKDEPS'] ext_out = ['.bin'] inst_to = '${BINDIR}' class cxxshlib(cxxprogram): "Links object files into c++ shared libraries" inst_to = '${LIBDIR}' class cxxstlib(stlink_task): "Links object files into c++ static libraries" pass # do not remove ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/d.py0000660000000000000000000000570600000000000021312 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2007-2018 (ita) from waflib import Utils, Task, Errors from waflib.TaskGen import taskgen_method, feature, extension from waflib.Tools import d_scan, d_config from waflib.Tools.ccroot import link_task, stlink_task class d(Task.Task): "Compile a d file into an object file" color = 'GREEN' run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_SRC_F:SRC} ${D_TGT_F:TGT}' scan = d_scan.scan class d_with_header(d): "Compile a d file and generate a header" run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}' class d_header(Task.Task): "Compile d headers" color = 'BLUE' run_str = '${D} ${D_HEADER} ${SRC}' class dprogram(link_task): "Link object files into a d program" run_str = '${D_LINKER} ${LINKFLAGS} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F:TGT} ${RPATH_ST:RPATH} ${DSTLIB_MARKER} ${DSTLIBPATH_ST:STLIBPATH} ${DSTLIB_ST:STLIB} ${DSHLIB_MARKER} ${DLIBPATH_ST:LIBPATH} ${DSHLIB_ST:LIB}' inst_to = '${BINDIR}' class dshlib(dprogram): "Link object files into a d shared library" inst_to = '${LIBDIR}' class dstlib(stlink_task): "Link object files into a d static library" pass # do not remove @extension('.d', '.di', '.D') def d_hook(self, node): """ Compile *D* files. To get .di files as well as .o files, set the following:: def build(bld): bld.program(source='foo.d', target='app', generate_headers=True) """ ext = Utils.destos_to_binfmt(self.env.DEST_OS) == 'pe' and 'obj' or 'o' out = '%s.%d.%s' % (node.name, self.idx, ext) def create_compiled_task(self, name, node): task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task if getattr(self, 'generate_headers', None): tsk = create_compiled_task(self, 'd_with_header', node) tsk.outputs.append(node.change_ext(self.env.DHEADER_ext)) else: tsk = create_compiled_task(self, 'd', node) return tsk @taskgen_method def generate_header(self, filename): """ See feature request #104:: def build(bld): tg = bld.program(source='foo.d', target='app') tg.generate_header('blah.d') # is equivalent to: #tg = bld.program(source='foo.d', target='app', header_lst='blah.d') :param filename: header to create :type filename: string """ try: self.header_lst.append([filename, self.install_path]) except AttributeError: self.header_lst = [[filename, self.install_path]] @feature('d') def process_header(self): """ Process the attribute 'header_lst' to create the d header compilation tasks:: def build(bld): bld.program(source='foo.d', target='app', header_lst='blah.d') """ for i in getattr(self, 'header_lst', []): node = self.path.find_resource(i[0]) if not node: raise Errors.WafError('file %r not found on d obj' % i[0]) self.create_task('d_header', node, node.change_ext('.di')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/d_config.py0000660000000000000000000000260700000000000022634 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) from waflib import Utils from waflib.Configure import conf @conf def d_platform_flags(self): """ Sets the extensions dll/so for d programs and libraries """ v = self.env if not v.DEST_OS: v.DEST_OS = Utils.unversioned_sys_platform() binfmt = Utils.destos_to_binfmt(self.env.DEST_OS) if binfmt == 'pe': v.dprogram_PATTERN = '%s.exe' v.dshlib_PATTERN = 'lib%s.dll' v.dstlib_PATTERN = 'lib%s.a' elif binfmt == 'mac-o': v.dprogram_PATTERN = '%s' v.dshlib_PATTERN = 'lib%s.dylib' v.dstlib_PATTERN = 'lib%s.a' else: v.dprogram_PATTERN = '%s' v.dshlib_PATTERN = 'lib%s.so' v.dstlib_PATTERN = 'lib%s.a' DLIB = ''' version(D_Version2) { import std.stdio; int main() { writefln("phobos2"); return 0; } } else { version(Tango) { import tango.stdc.stdio; int main() { printf("tango"); return 0; } } else { import std.stdio; int main() { writefln("phobos1"); return 0; } } } ''' """Detection string for the D standard library""" @conf def check_dlibrary(self, execute=True): """ Detects the kind of standard library that comes with the compiler, and sets conf.env.DLIBRARY to tango, phobos1 or phobos2 """ ret = self.check_cc(features='d dprogram', fragment=DLIB, compile_filename='test.d', execute=execute, define_ret=True) if execute: self.env.DLIBRARY = ret.strip() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/d_scan.py0000660000000000000000000001170000000000000022305 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) """ Provide a scanner for finding dependencies on d files """ import re from waflib import Utils def filter_comments(filename): """ :param filename: d file name :type filename: string :rtype: list :return: a list of characters """ txt = Utils.readf(filename) i = 0 buf = [] max = len(txt) begin = 0 while i < max: c = txt[i] if c == '"' or c == "'": # skip a string or character literal buf.append(txt[begin:i]) delim = c i += 1 while i < max: c = txt[i] if c == delim: break elif c == '\\': # skip the character following backslash i += 1 i += 1 i += 1 begin = i elif c == '/': # try to replace a comment with whitespace buf.append(txt[begin:i]) i += 1 if i == max: break c = txt[i] if c == '+': # eat nesting /+ +/ comment i += 1 nesting = 1 c = None while i < max: prev = c c = txt[i] if prev == '/' and c == '+': nesting += 1 c = None elif prev == '+' and c == '/': nesting -= 1 if nesting == 0: break c = None i += 1 elif c == '*': # eat /* */ comment i += 1 c = None while i < max: prev = c c = txt[i] if prev == '*' and c == '/': break i += 1 elif c == '/': # eat // comment i += 1 while i < max and txt[i] != '\n': i += 1 else: # no comment begin = i - 1 continue i += 1 begin = i buf.append(' ') else: i += 1 buf.append(txt[begin:]) return buf class d_parser(object): """ Parser for d files """ def __init__(self, env, incpaths): #self.code = '' #self.module = '' #self.imports = [] self.allnames = [] self.re_module = re.compile(r"module\s+([^;]+)") self.re_import = re.compile(r"import\s+([^;]+)") self.re_import_bindings = re.compile("([^:]+):(.*)") self.re_import_alias = re.compile("[^=]+=(.+)") self.env = env self.nodes = [] self.names = [] self.incpaths = incpaths def tryfind(self, filename): """ Search file a file matching an module/import directive :param filename: file to read :type filename: string """ found = 0 for n in self.incpaths: found = n.find_resource(filename.replace('.', '/') + '.d') if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename) def get_strings(self, code): """ :param code: d code to parse :type code: string :return: the modules that the code uses :rtype: a list of match objects """ #self.imports = [] self.module = '' lst = [] # get the module name (if present) mod_name = self.re_module.search(code) if mod_name: self.module = re.sub(r'\s+', '', mod_name.group(1)) # strip all whitespaces # go through the code, have a look at all import occurrences # first, lets look at anything beginning with "import" and ending with ";" import_iterator = self.re_import.finditer(code) if import_iterator: for import_match in import_iterator: import_match_str = re.sub(r'\s+', '', import_match.group(1)) # strip all whitespaces # does this end with an import bindings declaration? # (import bindings always terminate the list of imports) bindings_match = self.re_import_bindings.match(import_match_str) if bindings_match: import_match_str = bindings_match.group(1) # if so, extract the part before the ":" (since the module declaration(s) is/are located there) # split the matching string into a bunch of strings, separated by a comma matches = import_match_str.split(',') for match in matches: alias_match = self.re_import_alias.match(match) if alias_match: # is this an alias declaration? (alias = module name) if so, extract the module name match = alias_match.group(1) lst.append(match) return lst def start(self, node): """ The parsing starts here :param node: input file :type node: :py:class:`waflib.Node.Node` """ self.waiting = [node] # while the stack is not empty, add the dependencies while self.waiting: nd = self.waiting.pop(0) self.iter(nd) def iter(self, node): """ Find all the modules that a file depends on, uses :py:meth:`waflib.Tools.d_scan.d_parser.tryfind` to process dependent files :param node: input file :type node: :py:class:`waflib.Node.Node` """ path = node.abspath() # obtain the absolute path code = "".join(filter_comments(path)) # read the file and filter the comments names = self.get_strings(code) # obtain the import strings for x in names: # optimization if x in self.allnames: continue self.allnames.append(x) # for each name, see if it is like a node or not self.tryfind(x) def scan(self): "look for .d/.di used by a d file" env = self.env gruik = d_parser(env, self.generator.includes_nodes) node = self.inputs[0] gruik.start(node) nodes = gruik.nodes names = gruik.names return (nodes, names) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/dbus.py0000660000000000000000000000401600000000000022015 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 """ Compiles dbus files with **dbus-binding-tool** Typical usage:: def options(opt): opt.load('compiler_c dbus') def configure(conf): conf.load('compiler_c dbus') def build(bld): tg = bld.program( includes = '.', source = bld.path.ant_glob('*.c'), target = 'gnome-hello') tg.add_dbus_file('test.xml', 'test_prefix', 'glib-server') """ from waflib import Task, Errors from waflib.TaskGen import taskgen_method, before_method @taskgen_method def add_dbus_file(self, filename, prefix, mode): """ Adds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*. :param filename: xml file to compile :type filename: string :param prefix: dbus binding tool prefix (--prefix=prefix) :type prefix: string :param mode: dbus binding tool mode (--mode=mode) :type mode: string """ if not hasattr(self, 'dbus_lst'): self.dbus_lst = [] if not 'process_dbus' in self.meths: self.meths.append('process_dbus') self.dbus_lst.append([filename, prefix, mode]) @before_method('process_source') def process_dbus(self): """ Processes the dbus files stored in the attribute *dbus_lst* to create :py:class:`waflib.Tools.dbus.dbus_binding_tool` instances. """ for filename, prefix, mode in getattr(self, 'dbus_lst', []): node = self.path.find_resource(filename) if not node: raise Errors.WafError('file not found ' + filename) tsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h')) tsk.env.DBUS_BINDING_TOOL_PREFIX = prefix tsk.env.DBUS_BINDING_TOOL_MODE = mode class dbus_binding_tool(Task.Task): """ Compiles a dbus file """ color = 'BLUE' ext_out = ['.h'] run_str = '${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}' shell = True # temporary workaround for #795 def configure(conf): """ Detects the program dbus-binding-tool and sets ``conf.env.DBUS_BINDING_TOOL`` """ conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/dmd.py0000660000000000000000000000353000000000000021624 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2008-2018 (ita) import sys from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_dmd(conf): """ Finds the program *dmd*, *dmd2*, or *ldc* and set the variable *D* """ conf.find_program(['dmd', 'dmd2', 'ldc'], var='D') # make sure that we're dealing with dmd1, dmd2, or ldc(1) out = conf.cmd_and_log(conf.env.D + ['--help']) if out.find("D Compiler v") == -1: out = conf.cmd_and_log(conf.env.D + ['-version']) if out.find("based on DMD v1.") == -1: conf.fatal("detected compiler is not dmd/ldc") @conf def common_flags_ldc(conf): """ Sets the D flags required by *ldc* """ v = conf.env v.DFLAGS = ['-d-version=Posix'] v.LINKFLAGS = [] v.DFLAGS_dshlib = ['-relocation-model=pic'] @conf def common_flags_dmd(conf): """ Set the flags required by *dmd* or *dmd2* """ v = conf.env v.D_SRC_F = ['-c'] v.D_TGT_F = '-of%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-of%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-L-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L-L%s' v.LINKFLAGS_dprogram= ['-quiet'] v.DFLAGS_dshlib = ['-fPIC'] v.LINKFLAGS_dshlib = ['-L-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = ['-H', '-Hf'] v.D_HDR_F = '%s' def configure(conf): """ Configuration for *dmd*, *dmd2*, and *ldc* """ conf.find_dmd() if sys.platform == 'win32': out = conf.cmd_and_log(conf.env.D + ['--help']) if out.find('D Compiler v2.') > -1: conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead') conf.load('ar') conf.load('d') conf.common_flags_dmd() conf.d_platform_flags() if str(conf.env.D).find('ldc') > -1: conf.common_flags_ldc() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/errcheck.py0000660000000000000000000001722200000000000022651 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Common mistakes highlighting. There is a performance impact, so this tool is only loaded when running ``waf -v`` """ typos = { 'feature':'features', 'sources':'source', 'targets':'target', 'include':'includes', 'export_include':'export_includes', 'define':'defines', 'importpath':'includes', 'installpath':'install_path', 'iscopy':'is_copy', 'uses':'use', } meths_typos = ['__call__', 'program', 'shlib', 'stlib', 'objects'] import sys from waflib import Logs, Build, Node, Task, TaskGen, ConfigSet, Errors, Utils from waflib.Tools import ccroot def check_same_targets(self): mp = Utils.defaultdict(list) uids = {} def check_task(tsk): if not isinstance(tsk, Task.Task): return if hasattr(tsk, 'no_errcheck_out'): return for node in tsk.outputs: mp[node].append(tsk) try: uids[tsk.uid()].append(tsk) except KeyError: uids[tsk.uid()] = [tsk] for g in self.groups: for tg in g: try: for tsk in tg.tasks: check_task(tsk) except AttributeError: # raised if not a task generator, which should be uncommon check_task(tg) dupe = False for (k, v) in mp.items(): if len(v) > 1: dupe = True msg = '* Node %r is created more than once%s. The task generators are:' % (k, Logs.verbose == 1 and " (full message on 'waf -v -v')" or "") Logs.error(msg) for x in v: if Logs.verbose > 1: Logs.error(' %d. %r', 1 + v.index(x), x.generator) else: Logs.error(' %d. %r in %r', 1 + v.index(x), x.generator.name, getattr(x.generator, 'path', None)) Logs.error('If you think that this is an error, set no_errcheck_out on the task instance') if not dupe: for (k, v) in uids.items(): if len(v) > 1: Logs.error('* Several tasks use the same identifier. Please check the information on\n https://waf.io/apidocs/Task.html?highlight=uid#waflib.Task.Task.uid') tg_details = tsk.generator.name if Logs.verbose > 2: tg_details = tsk.generator for tsk in v: Logs.error(' - object %r (%r) defined in %r', tsk.__class__.__name__, tsk, tg_details) def check_invalid_constraints(self): feat = set() for x in list(TaskGen.feats.values()): feat.union(set(x)) for (x, y) in TaskGen.task_gen.prec.items(): feat.add(x) feat.union(set(y)) ext = set() for x in TaskGen.task_gen.mappings.values(): ext.add(x.__name__) invalid = ext & feat if invalid: Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method', list(invalid)) # the build scripts have been read, so we can check for invalid after/before attributes on task classes for cls in list(Task.classes.values()): if sys.hexversion > 0x3000000 and issubclass(cls, Task.Task) and isinstance(cls.hcode, str): raise Errors.WafError('Class %r has hcode value %r of type , expecting (use Utils.h_cmd() ?)' % (cls, cls.hcode)) for x in ('before', 'after'): for y in Utils.to_list(getattr(cls, x, [])): if not Task.classes.get(y): Logs.error('Erroneous order constraint %r=%r on task class %r', x, y, cls.__name__) if getattr(cls, 'rule', None): Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")', cls.__name__) def replace(m): """ Replaces existing BuildContext methods to verify parameter names, for example ``bld(source=)`` has no ending *s* """ oldcall = getattr(Build.BuildContext, m) def call(self, *k, **kw): ret = oldcall(self, *k, **kw) for x in typos: if x in kw: if x == 'iscopy' and 'subst' in getattr(self, 'features', ''): continue Logs.error('Fix the typo %r -> %r on %r', x, typos[x], ret) return ret setattr(Build.BuildContext, m, call) def enhance_lib(): """ Modifies existing classes and methods to enable error verification """ for m in meths_typos: replace(m) # catch '..' in ant_glob patterns def ant_glob(self, *k, **kw): if k: lst = Utils.to_list(k[0]) for pat in lst: sp = pat.split('/') if '..' in sp: Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'", k[0]) if '.' in sp: Logs.error("In ant_glob pattern %r: '.' means 'one dot', not 'current directory'", k[0]) return self.old_ant_glob(*k, **kw) Node.Node.old_ant_glob = Node.Node.ant_glob Node.Node.ant_glob = ant_glob # catch ant_glob on build folders def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): if remove: try: if self.is_child_of(self.ctx.bldnode) and not quiet: quiet = True Logs.error('Calling ant_glob on build folders (%r) is dangerous: add quiet=True / remove=False', self) except AttributeError: pass return self.old_ant_iter(accept, maxdepth, pats, dir, src, remove, quiet) Node.Node.old_ant_iter = Node.Node.ant_iter Node.Node.ant_iter = ant_iter # catch conflicting ext_in/ext_out/before/after declarations old = Task.is_before def is_before(t1, t2): ret = old(t1, t2) if ret and old(t2, t1): Logs.error('Contradictory order constraints in classes %r %r', t1, t2) return ret Task.is_before = is_before # check for bld(feature='cshlib') where no 'c' is given - this can be either a mistake or on purpose # so we only issue a warning def check_err_features(self): lst = self.to_list(self.features) if 'shlib' in lst: Logs.error('feature shlib -> cshlib, dshlib or cxxshlib') for x in ('c', 'cxx', 'd', 'fc'): if not x in lst and lst and lst[0] in [x+y for y in ('program', 'shlib', 'stlib')]: Logs.error('%r features is probably missing %r', self, x) TaskGen.feature('*')(check_err_features) # check for erroneous order constraints def check_err_order(self): if not hasattr(self, 'rule') and not 'subst' in Utils.to_list(self.features): for x in ('before', 'after', 'ext_in', 'ext_out'): if hasattr(self, x): Logs.warn('Erroneous order constraint %r on non-rule based task generator %r', x, self) else: for x in ('before', 'after'): for y in self.to_list(getattr(self, x, [])): if not Task.classes.get(y): Logs.error('Erroneous order constraint %s=%r on %r (no such class)', x, y, self) TaskGen.feature('*')(check_err_order) # check for @extension used with @feature/@before_method/@after_method def check_compile(self): check_invalid_constraints(self) try: ret = self.orig_compile() finally: check_same_targets(self) return ret Build.BuildContext.orig_compile = Build.BuildContext.compile Build.BuildContext.compile = check_compile # check for invalid build groups #914 def use_rec(self, name, **kw): try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: pass else: idx = self.bld.get_group_idx(self) odx = self.bld.get_group_idx(y) if odx > idx: msg = "Invalid 'use' across build groups:" if Logs.verbose > 1: msg += '\n target %r\n uses:\n %r' % (self, y) else: msg += " %r uses %r (try 'waf -v -v' for the full error)" % (self.name, name) raise Errors.WafError(msg) self.orig_use_rec(name, **kw) TaskGen.task_gen.orig_use_rec = TaskGen.task_gen.use_rec TaskGen.task_gen.use_rec = use_rec # check for env.append def _getattr(self, name, default=None): if name == 'append' or name == 'add': raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique') elif name == 'prepend': raise Errors.WafError('env.prepend does not exist: use env.prepend_value') if name in self.__slots__: return super(ConfigSet.ConfigSet, self).__getattr__(name, default) else: return self[name] ConfigSet.ConfigSet.__getattr__ = _getattr def options(opt): """ Error verification can be enabled by default (not just on ``waf -v``) by adding to the user script options """ enhance_lib() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/fc.py0000660000000000000000000001515300000000000021454 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) """ Fortran support """ from waflib import Utils, Task, Errors from waflib.Tools import ccroot, fc_config, fc_scan from waflib.TaskGen import extension from waflib.Configure import conf ccroot.USELIB_VARS['fc'] = set(['FCFLAGS', 'DEFINES', 'INCLUDES', 'FCPPFLAGS']) ccroot.USELIB_VARS['fcprogram_test'] = ccroot.USELIB_VARS['fcprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'LDFLAGS']) ccroot.USELIB_VARS['fcshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'LDFLAGS']) ccroot.USELIB_VARS['fcstlib'] = set(['ARFLAGS', 'LINKDEPS']) @extension('.f','.F','.f90','.F90','.for','.FOR','.f95','.F95','.f03','.F03','.f08','.F08') def fc_hook(self, node): "Binds the Fortran file extensions create :py:class:`waflib.Tools.fc.fc` instances" return self.create_compiled_task('fc', node) @conf def modfile(conf, name): """ Turns a module name into the right module file name. Defaults to all lower case. """ if name.find(':') >= 0: # Depending on a submodule! separator = conf.env.FC_SUBMOD_SEPARATOR or '@' # Ancestors of the submodule will be prefixed to the # submodule name, separated by a colon. modpath = name.split(':') # Only the ancestor (actual) module and the submodule name # will be used for the filename. modname = modpath[0] + separator + modpath[-1] suffix = conf.env.FC_SUBMOD_SUFFIX or '.smod' else: modname = name suffix = '.mod' return {'lower' :modname.lower() + suffix.lower(), 'lower.MOD' :modname.lower() + suffix.upper(), 'UPPER.mod' :modname.upper() + suffix.lower(), 'UPPER' :modname.upper() + suffix.upper()}[conf.env.FC_MOD_CAPITALIZATION or 'lower'] def get_fortran_tasks(tsk): """ Obtains all fortran tasks from the same build group. Those tasks must not have the attribute 'nomod' or 'mod_fortran_done' :return: a list of :py:class:`waflib.Tools.fc.fc` instances """ bld = tsk.generator.bld tasks = bld.get_tasks_group(bld.get_group_idx(tsk.generator)) return [x for x in tasks if isinstance(x, fc) and not getattr(x, 'nomod', None) and not getattr(x, 'mod_fortran_done', None)] class fc(Task.Task): """ Fortran tasks can only run when all fortran tasks in a current task group are ready to be executed This may cause a deadlock if some fortran task is waiting for something that cannot happen (circular dependency) Should this ever happen, set the 'nomod=True' on those tasks instances to break the loop """ color = 'GREEN' run_str = '${FC} ${FCFLAGS} ${FCINCPATH_ST:INCPATHS} ${FCDEFINES_ST:DEFINES} ${_FCMODOUTFLAGS} ${FC_TGT_F}${TGT[0].abspath()} ${FC_SRC_F}${SRC[0].abspath()} ${FCPPFLAGS}' vars = ["FORTRANMODPATHFLAG"] def scan(self): """Fortran dependency scanner""" tmp = fc_scan.fortran_parser(self.generator.includes_nodes) tmp.task = self tmp.start(self.inputs[0]) return (tmp.nodes, tmp.names) def runnable_status(self): """ Sets the mod file outputs and the dependencies on the mod files over all Fortran tasks executed by the main thread so there are no concurrency issues """ if getattr(self, 'mod_fortran_done', None): return super(fc, self).runnable_status() # now, if we reach this part it is because this fortran task is the first in the list bld = self.generator.bld # obtain the fortran tasks lst = get_fortran_tasks(self) # disable this method for other tasks for tsk in lst: tsk.mod_fortran_done = True # wait for all the .f tasks to be ready for execution # and ensure that the scanners are called at least once for tsk in lst: ret = tsk.runnable_status() if ret == Task.ASK_LATER: # we have to wait for one of the other fortran tasks to be ready # this may deadlock if there are dependencies between fortran tasks # but this should not happen (we are setting them here!) for x in lst: x.mod_fortran_done = None return Task.ASK_LATER ins = Utils.defaultdict(set) outs = Utils.defaultdict(set) # the .mod files to create for tsk in lst: key = tsk.uid() for x in bld.raw_deps[key]: if x.startswith('MOD@'): name = bld.modfile(x.replace('MOD@', '')) node = bld.srcnode.find_or_declare(name) tsk.set_outputs(node) outs[node].add(tsk) # the .mod files to use for tsk in lst: key = tsk.uid() for x in bld.raw_deps[key]: if x.startswith('USE@'): name = bld.modfile(x.replace('USE@', '')) node = bld.srcnode.find_resource(name) if node and node not in tsk.outputs: if not node in bld.node_deps[key]: bld.node_deps[key].append(node) ins[node].add(tsk) # if the intersection matches, set the order for k in ins.keys(): for a in ins[k]: a.run_after.update(outs[k]) for x in outs[k]: self.generator.bld.producer.revdeps[x].add(a) # the scanner cannot output nodes, so we have to set them # ourselves as task.dep_nodes (additional input nodes) tmp = [] for t in outs[k]: tmp.extend(t.outputs) a.dep_nodes.extend(tmp) a.dep_nodes.sort(key=lambda x: x.abspath()) # the task objects have changed: clear the signature cache for tsk in lst: try: delattr(tsk, 'cache_sig') except AttributeError: pass return super(fc, self).runnable_status() class fcprogram(ccroot.link_task): """Links Fortran programs""" color = 'YELLOW' run_str = '${FC} ${LINKFLAGS} ${FCLNK_SRC_F}${SRC} ${FCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FCSTLIB_MARKER} ${FCSTLIBPATH_ST:STLIBPATH} ${FCSTLIB_ST:STLIB} ${FCSHLIB_MARKER} ${FCLIBPATH_ST:LIBPATH} ${FCLIB_ST:LIB} ${LDFLAGS}' inst_to = '${BINDIR}' class fcshlib(fcprogram): """Links Fortran libraries""" inst_to = '${LIBDIR}' class fcstlib(ccroot.stlink_task): """Links Fortran static libraries (uses ar by default)""" pass # do not remove the pass statement class fcprogram_test(fcprogram): """Custom link task to obtain compiler outputs for Fortran configuration tests""" def runnable_status(self): """This task is always executed""" ret = super(fcprogram_test, self).runnable_status() if ret == Task.SKIP_ME: ret = Task.RUN_ME return ret def exec_command(self, cmd, **kw): """Stores the compiler std our/err onto the build context, to bld.out + bld.err""" bld = self.generator.bld kw['shell'] = isinstance(cmd, str) kw['stdout'] = kw['stderr'] = Utils.subprocess.PIPE kw['cwd'] = self.get_cwd() bld.out = bld.err = '' bld.to_log('command: %s\n' % cmd) kw['output'] = 0 try: (bld.out, bld.err) = bld.cmd_and_log(cmd, **kw) except Errors.WafError: return -1 if bld.out: bld.to_log('out: %s\n' % bld.out) if bld.err: bld.to_log('err: %s\n' % bld.err) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/fc_config.py0000660000000000000000000003324200000000000023000 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) """ Fortran configuration helpers """ import re, os, sys, shlex from waflib.Configure import conf from waflib.TaskGen import feature, before_method FC_FRAGMENT = ' program main\n end program main\n' FC_FRAGMENT2 = ' PROGRAM MAIN\n END\n' # what's the actual difference between these? @conf def fc_flags(conf): """ Defines common fortran configuration flags and file extensions """ v = conf.env v.FC_SRC_F = [] v.FC_TGT_F = ['-c', '-o'] v.FCINCPATH_ST = '-I%s' v.FCDEFINES_ST = '-D%s' if not v.LINK_FC: v.LINK_FC = v.FC v.FCLNK_SRC_F = [] v.FCLNK_TGT_F = ['-o'] v.FCFLAGS_fcshlib = ['-fpic'] v.LINKFLAGS_fcshlib = ['-shared'] v.fcshlib_PATTERN = 'lib%s.so' v.fcstlib_PATTERN = 'lib%s.a' v.FCLIB_ST = '-l%s' v.FCLIBPATH_ST = '-L%s' v.FCSTLIB_ST = '-l%s' v.FCSTLIBPATH_ST = '-L%s' v.FCSTLIB_MARKER = '-Wl,-Bstatic' v.FCSHLIB_MARKER = '-Wl,-Bdynamic' v.SONAME_ST = '-Wl,-h,%s' @conf def fc_add_flags(conf): """ Adds FCFLAGS / LDFLAGS / LINKFLAGS from os.environ to conf.env """ conf.add_os_flags('FCPPFLAGS', dup=False) conf.add_os_flags('FCFLAGS', dup=False) conf.add_os_flags('LINKFLAGS', dup=False) conf.add_os_flags('LDFLAGS', dup=False) @conf def check_fortran(self, *k, **kw): """ Compiles a Fortran program to ensure that the settings are correct """ self.check_cc( fragment = FC_FRAGMENT, compile_filename = 'test.f', features = 'fc fcprogram', msg = 'Compiling a simple fortran app') @conf def check_fc(self, *k, **kw): """ Same as :py:func:`waflib.Tools.c_config.check` but defaults to the *Fortran* programming language (this overrides the C defaults in :py:func:`waflib.Tools.c_config.validate_c`) """ kw['compiler'] = 'fc' if not 'compile_mode' in kw: kw['compile_mode'] = 'fc' if not 'type' in kw: kw['type'] = 'fcprogram' if not 'compile_filename' in kw: kw['compile_filename'] = 'test.f90' if not 'code' in kw: kw['code'] = FC_FRAGMENT return self.check(*k, **kw) # ------------------------------------------------------------------------ # --- These are the default platform modifiers, refactored here for # convenience. gfortran and g95 have much overlap. # ------------------------------------------------------------------------ @conf def fortran_modifier_darwin(conf): """ Defines Fortran flags and extensions for OSX systems """ v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.LINKFLAGS_fcshlib = ['-dynamiclib'] v.fcshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.LINKFLAGS_fcstlib = [] v.FCSHLIB_MARKER = '' v.FCSTLIB_MARKER = '' v.SONAME_ST = '' @conf def fortran_modifier_win32(conf): """ Defines Fortran flags for Windows platforms """ v = conf.env v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' v.fcshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.FCFLAGS_fcshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def fortran_modifier_cygwin(conf): """ Defines Fortran flags for use on cygwin """ fortran_modifier_win32(conf) v = conf.env v.fcshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_fcshlib', ['-Wl,--enable-auto-image-base']) v.FCFLAGS_fcshlib = [] # ------------------------------------------------------------------------ @conf def check_fortran_dummy_main(self, *k, **kw): """ Determines if a main function is needed by compiling a code snippet with the C compiler and linking it with the Fortran compiler (useful on unix-like systems) """ if not self.env.CC: self.fatal('A c compiler is required for check_fortran_dummy_main') lst = ['MAIN__', '__MAIN', '_MAIN', 'MAIN_', 'MAIN'] lst.extend([m.lower() for m in lst]) lst.append('') self.start_msg('Detecting whether we need a dummy main') for main in lst: kw['fortran_main'] = main try: self.check_cc( fragment = 'int %s() { return 0; }\n' % (main or 'test'), features = 'c fcprogram', mandatory = True ) if not main: self.env.FC_MAIN = -1 self.end_msg('no') else: self.env.FC_MAIN = main self.end_msg('yes %s' % main) break except self.errors.ConfigurationError: pass else: self.end_msg('not found') self.fatal('could not detect whether fortran requires a dummy main, see the config.log') # ------------------------------------------------------------------------ GCC_DRIVER_LINE = re.compile('^Driving:') POSIX_STATIC_EXT = re.compile(r'\S+\.a') POSIX_LIB_FLAGS = re.compile(r'-l\S+') @conf def is_link_verbose(self, txt): """Returns True if 'useful' link options can be found in txt""" assert isinstance(txt, str) for line in txt.splitlines(): if not GCC_DRIVER_LINE.search(line): if POSIX_STATIC_EXT.search(line) or POSIX_LIB_FLAGS.search(line): return True return False @conf def check_fortran_verbose_flag(self, *k, **kw): """ Checks what kind of verbose (-v) flag works, then sets it to env.FC_VERBOSE_FLAG """ self.start_msg('fortran link verbose flag') for x in ('-v', '--verbose', '-verbose', '-V'): try: self.check_cc( features = 'fc fcprogram_test', fragment = FC_FRAGMENT2, compile_filename = 'test.f', linkflags = [x], mandatory=True) except self.errors.ConfigurationError: pass else: # output is on stderr or stdout (for xlf) if self.is_link_verbose(self.test_bld.err) or self.is_link_verbose(self.test_bld.out): self.end_msg(x) break else: self.end_msg('failure') self.fatal('Could not obtain the fortran link verbose flag (see config.log)') self.env.FC_VERBOSE_FLAG = x return x # ------------------------------------------------------------------------ # linkflags which match those are ignored LINKFLAGS_IGNORED = [r'-lang*', r'-lcrt[a-zA-Z0-9\.]*\.o', r'-lc$', r'-lSystem', r'-libmil', r'-LIST:*', r'-LNO:*'] if os.name == 'nt': LINKFLAGS_IGNORED.extend([r'-lfrt*', r'-luser32', r'-lkernel32', r'-ladvapi32', r'-lmsvcrt', r'-lshell32', r'-lmingw', r'-lmoldname']) else: LINKFLAGS_IGNORED.append(r'-lgcc*') RLINKFLAGS_IGNORED = [re.compile(f) for f in LINKFLAGS_IGNORED] def _match_ignore(line): """Returns True if the line should be ignored (Fortran verbose flag test)""" for i in RLINKFLAGS_IGNORED: if i.match(line): return True return False def parse_fortran_link(lines): """Given the output of verbose link of Fortran compiler, this returns a list of flags necessary for linking using the standard linker.""" final_flags = [] for line in lines: if not GCC_DRIVER_LINE.match(line): _parse_flink_line(line, final_flags) return final_flags SPACE_OPTS = re.compile('^-[LRuYz]$') NOSPACE_OPTS = re.compile('^-[RL]') def _parse_flink_token(lexer, token, tmp_flags): # Here we go (convention for wildcard is shell, not regex !) # 1 TODO: we first get some root .a libraries # 2 TODO: take everything starting by -bI:* # 3 Ignore the following flags: -lang* | -lcrt*.o | -lc | # -lgcc* | -lSystem | -libmil | -LANG:=* | -LIST:* | -LNO:*) # 4 take into account -lkernel32 # 5 For options of the kind -[[LRuYz]], as they take one argument # after, the actual option is the next token # 6 For -YP,*: take and replace by -Larg where arg is the old # argument # 7 For -[lLR]*: take # step 3 if _match_ignore(token): pass # step 4 elif token.startswith('-lkernel32') and sys.platform == 'cygwin': tmp_flags.append(token) # step 5 elif SPACE_OPTS.match(token): t = lexer.get_token() if t.startswith('P,'): t = t[2:] for opt in t.split(os.pathsep): tmp_flags.append('-L%s' % opt) # step 6 elif NOSPACE_OPTS.match(token): tmp_flags.append(token) # step 7 elif POSIX_LIB_FLAGS.match(token): tmp_flags.append(token) else: # ignore anything not explicitly taken into account pass t = lexer.get_token() return t def _parse_flink_line(line, final_flags): """private""" lexer = shlex.shlex(line, posix = True) lexer.whitespace_split = True t = lexer.get_token() tmp_flags = [] while t: t = _parse_flink_token(lexer, t, tmp_flags) final_flags.extend(tmp_flags) return final_flags @conf def check_fortran_clib(self, autoadd=True, *k, **kw): """ Obtains the flags for linking with the C library if this check works, add uselib='CLIB' to your task generators """ if not self.env.FC_VERBOSE_FLAG: self.fatal('env.FC_VERBOSE_FLAG is not set: execute check_fortran_verbose_flag?') self.start_msg('Getting fortran runtime link flags') try: self.check_cc( fragment = FC_FRAGMENT2, compile_filename = 'test.f', features = 'fc fcprogram_test', linkflags = [self.env.FC_VERBOSE_FLAG] ) except Exception: self.end_msg(False) if kw.get('mandatory', True): conf.fatal('Could not find the c library flags') else: out = self.test_bld.err flags = parse_fortran_link(out.splitlines()) self.end_msg('ok (%s)' % ' '.join(flags)) self.env.LINKFLAGS_CLIB = flags return flags return [] def getoutput(conf, cmd, stdin=False): """ Obtains Fortran command outputs """ from waflib import Errors if conf.env.env: env = conf.env.env else: env = dict(os.environ) env['LANG'] = 'C' input = stdin and '\n'.encode() or None try: out, err = conf.cmd_and_log(cmd, env=env, output=0, input=input) except Errors.WafError as e: # An WafError might indicate an error code during the command # execution, in this case we still obtain the stderr and stdout, # which we can use to find the version string. if not (hasattr(e, 'stderr') and hasattr(e, 'stdout')): raise e else: # Ignore the return code and return the original # stdout and stderr. out = e.stdout err = e.stderr except Exception: conf.fatal('could not determine the compiler version %r' % cmd) return (out, err) # ------------------------------------------------------------------------ ROUTINES_CODE = """\ subroutine foobar() return end subroutine foo_bar() return end """ MAIN_CODE = """ void %(dummy_func_nounder)s(void); void %(dummy_func_under)s(void); int %(main_func_name)s() { %(dummy_func_nounder)s(); %(dummy_func_under)s(); return 0; } """ @feature('link_main_routines_func') @before_method('process_source') def link_main_routines_tg_method(self): """ The configuration test declares a unique task generator, so we create other task generators from there for fortran link tests """ def write_test_file(task): task.outputs[0].write(task.generator.code) bld = self.bld bld(rule=write_test_file, target='main.c', code=MAIN_CODE % self.__dict__) bld(rule=write_test_file, target='test.f', code=ROUTINES_CODE) bld(features='fc fcstlib', source='test.f', target='test') bld(features='c fcprogram', source='main.c', target='app', use='test') def mangling_schemes(): """ Generate triplets for use with mangle_name (used in check_fortran_mangling) the order is tuned for gfortan """ for u in ('_', ''): for du in ('', '_'): for c in ("lower", "upper"): yield (u, du, c) def mangle_name(u, du, c, name): """Mangle a name from a triplet (used in check_fortran_mangling)""" return getattr(name, c)() + u + (name.find('_') != -1 and du or '') @conf def check_fortran_mangling(self, *k, **kw): """ Detect the mangling scheme, sets FORTRAN_MANGLING to the triplet found This test will compile a fortran static library, then link a c app against it """ if not self.env.CC: self.fatal('A c compiler is required for link_main_routines') if not self.env.FC: self.fatal('A fortran compiler is required for link_main_routines') if not self.env.FC_MAIN: self.fatal('Checking for mangling requires self.env.FC_MAIN (execute "check_fortran_dummy_main" first?)') self.start_msg('Getting fortran mangling scheme') for (u, du, c) in mangling_schemes(): try: self.check_cc( compile_filename = [], features = 'link_main_routines_func', msg = 'nomsg', errmsg = 'nomsg', dummy_func_nounder = mangle_name(u, du, c, 'foobar'), dummy_func_under = mangle_name(u, du, c, 'foo_bar'), main_func_name = self.env.FC_MAIN ) except self.errors.ConfigurationError: pass else: self.end_msg("ok ('%s', '%s', '%s-case')" % (u, du, c)) self.env.FORTRAN_MANGLING = (u, du, c) break else: self.end_msg(False) self.fatal('mangler not found') return (u, du, c) @feature('pyext') @before_method('propagate_uselib_vars', 'apply_link') def set_lib_pat(self): """Sets the Fortran flags for linking with Python""" self.env.fcshlib_PATTERN = self.env.pyext_PATTERN @conf def detect_openmp(self): """ Detects openmp flags and sets the OPENMP ``FCFLAGS``/``LINKFLAGS`` """ for x in ('-fopenmp','-openmp','-mp','-xopenmp','-omp','-qsmp=omp'): try: self.check_fc( msg = 'Checking for OpenMP flag %s' % x, fragment = 'program main\n call omp_get_num_threads()\nend program main', fcflags = x, linkflags = x, uselib_store = 'OPENMP' ) except self.errors.ConfigurationError: pass else: break else: self.fatal('Could not find OpenMP') @conf def check_gfortran_o_space(self): if self.env.FC_NAME != 'GFORTRAN' or int(self.env.FC_VERSION[0]) > 4: # This is for old compilers and only for gfortran. # No idea how other implementations handle this. Be safe and bail out. return self.env.stash() self.env.FCLNK_TGT_F = ['-o', ''] try: self.check_fc(msg='Checking if the -o link must be split from arguments', fragment=FC_FRAGMENT, features='fc fcshlib') except self.errors.ConfigurationError: self.env.revert() else: self.env.commit() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/fc_scan.py0000660000000000000000000000602100000000000022452 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import re INC_REGEX = r"""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])""" USE_REGEX = r"""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)""" MOD_REGEX = r"""(?:^|;)\s*MODULE(?!\s+(?:PROCEDURE|SUBROUTINE|FUNCTION))\s+(\w+)""" SMD_REGEX = r"""(?:^|;)\s*SUBMODULE\s*\(([\w:]+)\)\s*(\w+)""" re_inc = re.compile(INC_REGEX, re.I) re_use = re.compile(USE_REGEX, re.I) re_mod = re.compile(MOD_REGEX, re.I) re_smd = re.compile(SMD_REGEX, re.I) class fortran_parser(object): """ This parser returns: * the nodes corresponding to the module names to produce * the nodes corresponding to the include files used * the module names used by the fortran files """ def __init__(self, incpaths): self.seen = [] """Files already parsed""" self.nodes = [] """List of :py:class:`waflib.Node.Node` representing the dependencies to return""" self.names = [] """List of module names to return""" self.incpaths = incpaths """List of :py:class:`waflib.Node.Node` representing the include paths""" def find_deps(self, node): """ Parses a Fortran file to obtain the dependencies used/provided :param node: fortran file to read :type node: :py:class:`waflib.Node.Node` :return: lists representing the includes, the modules used, and the modules created by a fortran file :rtype: tuple of list of strings """ txt = node.read() incs = [] uses = [] mods = [] for line in txt.splitlines(): # line by line regexp search? optimize? m = re_inc.search(line) if m: incs.append(m.group(1)) m = re_use.search(line) if m: uses.append(m.group(1)) m = re_mod.search(line) if m: mods.append(m.group(1)) m = re_smd.search(line) if m: uses.append(m.group(1)) mods.append('{0}:{1}'.format(m.group(1),m.group(2))) return (incs, uses, mods) def start(self, node): """ Start parsing. Use the stack ``self.waiting`` to hold nodes to iterate on :param node: fortran file :type node: :py:class:`waflib.Node.Node` """ self.waiting = [node] while self.waiting: nd = self.waiting.pop(0) self.iter(nd) def iter(self, node): """ Processes a single file during dependency parsing. Extracts files used modules used and modules provided. """ incs, uses, mods = self.find_deps(node) for x in incs: if x in self.seen: continue self.seen.append(x) self.tryfind_header(x) for x in uses: name = "USE@%s" % x if not name in self.names: self.names.append(name) for x in mods: name = "MOD@%s" % x if not name in self.names: self.names.append(name) def tryfind_header(self, filename): """ Adds an include file to the list of nodes to process :param filename: file name :type filename: string """ found = None for n in self.incpaths: found = n.find_resource(filename) if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/flex.py0000660000000000000000000000302100000000000022011 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy, 2006-2018 (ita) """ The **flex** program is a code generator which creates C or C++ files. The generated files are compiled into object files. """ import os, re from waflib import Task, TaskGen from waflib.Tools import ccroot def decide_ext(self, node): if 'cxx' in self.features: return ['.lex.cc'] return ['.lex.c'] def flexfun(tsk): env = tsk.env bld = tsk.generator.bld wd = bld.variant_dir def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = lst = [] lst.extend(to_list(env.FLEX)) lst.extend(to_list(env.FLEXFLAGS)) inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs] if env.FLEX_MSYS: inputs = [x.replace(os.sep, '/') for x in inputs] lst.extend(inputs) lst = [x for x in lst if x] txt = bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207 TaskGen.declare_chain( name = 'flex', rule = flexfun, # issue #854 ext_in = '.l', decider = decide_ext, ) # To support the following: # bld(features='c', flexflags='-P/foo') Task.classes['flex'].vars = ['FLEXFLAGS', 'FLEX'] ccroot.USELIB_VARS['c'].add('FLEXFLAGS') ccroot.USELIB_VARS['cxx'].add('FLEXFLAGS') def configure(conf): """ Detect the *flex* program """ conf.find_program('flex', var='FLEX') conf.env.FLEXFLAGS = ['-t'] if re.search (r"\\msys\\[0-9.]+\\bin\\flex.exe$", conf.env.FLEX[0]): # this is the flex shipped with MSYS conf.env.FLEX_MSYS = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/g95.py0000660000000000000000000000276200000000000021472 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # KWS 2010 # Thomas Nagy 2016-2018 (ita) import re from waflib import Utils from waflib.Tools import fc, fc_config, fc_scan, ar from waflib.Configure import conf @conf def find_g95(conf): fc = conf.find_program('g95', var='FC') conf.get_g95_version(fc) conf.env.FC_NAME = 'G95' @conf def g95_flags(conf): v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.FORTRANMODFLAG = ['-fmod=', ''] # template for module path v.FCFLAGS_DEBUG = ['-Werror'] # why not @conf def g95_modifier_win32(conf): fc_config.fortran_modifier_win32(conf) @conf def g95_modifier_cygwin(conf): fc_config.fortran_modifier_cygwin(conf) @conf def g95_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def g95_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() g95_modifier_func = getattr(conf, 'g95_modifier_' + dest_os, None) if g95_modifier_func: g95_modifier_func() @conf def get_g95_version(conf, fc): """get the compiler version""" version_re = re.compile(r"g95\s*(?P\d*)\.(?P\d*)").search cmd = fc + ['--version'] out, err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('cannot determine g95 version') k = match.groupdict() conf.env.FC_VERSION = (k['major'], k['minor']) def configure(conf): conf.find_g95() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.g95_flags() conf.g95_modifier_platform() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.2574158 tevent-0.11.0/third_party/waf/waflib/Tools/gas.py0000660000000000000000000000070000000000000021626 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) "Detect as/gas/gcc for compiling assembly files" import waflib.Tools.asm # - leave this from waflib.Tools import ar def configure(conf): """ Find the programs gas/as/gcc and set the variable *AS* """ conf.find_program(['gas', 'gcc'], var='AS') conf.env.AS_TGT_F = ['-c', '-o'] conf.env.ASLNK_TGT_F = ['-o'] conf.find_ar() conf.load('asm') conf.env.ASM_NAME = 'gas' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/gcc.py0000660000000000000000000000770100000000000021620 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 """ gcc/llvm detection. """ from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_gcc(conf): """ Find the program gcc, and if present, try to detect its version number """ cc = conf.find_program(['gcc', 'cc'], var='CC') conf.get_cc_version(cc, gcc=True) conf.env.CC_NAME = 'gcc' @conf def gcc_common_flags(conf): """ Common flags for gcc on nearly all platforms """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o'] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = [] v.CCLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Wl,-Bdynamic' v.STLIB_MARKER = '-Wl,-Bstatic' v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-shared'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = ['-Wl,-Bstatic'] v.cstlib_PATTERN = 'lib%s.a' v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] v.CFLAGS_MACBUNDLE = ['-fPIC'] v.macbundle_PATTERN = '%s.bundle' @conf def gcc_modifier_win32(conf): """Configuration flags for executing gcc on Windows""" v = conf.env v.cprogram_PATTERN = '%s.exe' v.cshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.CFLAGS_cshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def gcc_modifier_cygwin(conf): """Configuration flags for executing gcc on Cygwin""" gcc_modifier_win32(conf) v = conf.env v.cshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base']) v.CFLAGS_cshlib = [] @conf def gcc_modifier_darwin(conf): """Configuration flags for executing gcc on MacOS""" v = conf.env v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-dynamiclib'] v.cshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.ARCH_ST = ['-arch'] v.LINKFLAGS_cstlib = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gcc_modifier_aix(conf): """Configuration flags for executing gcc on AIX""" v = conf.env v.LINKFLAGS_cprogram = ['-Wl,-brtl'] v.LINKFLAGS_cshlib = ['-shared','-Wl,-brtl,-bexpfull'] v.SHLIB_MARKER = [] @conf def gcc_modifier_hpux(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.CFLAGS_cshlib = ['-fPIC','-DPIC'] v.cshlib_PATTERN = 'lib%s.sl' @conf def gcc_modifier_openbsd(conf): conf.env.SONAME_ST = [] @conf def gcc_modifier_osf1V(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gcc_modifier_platform(conf): """Execute platform-specific functions based on *gcc_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. gcc_modifier_func = getattr(conf, 'gcc_modifier_' + conf.env.DEST_OS, None) if gcc_modifier_func: gcc_modifier_func() def configure(conf): """ Configuration for gcc """ conf.find_gcc() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() conf.check_gcc_o_space() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/gdc.py0000660000000000000000000000212300000000000021612 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_gdc(conf): """ Finds the program gdc and set the variable *D* """ conf.find_program('gdc', var='D') out = conf.cmd_and_log(conf.env.D + ['--version']) if out.find("gdc") == -1: conf.fatal("detected compiler is not gdc") @conf def common_flags_gdc(conf): """ Sets the flags required by *gdc* """ v = conf.env v.DFLAGS = [] v.D_SRC_F = ['-c'] v.D_TGT_F = '-o%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-o%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L%s' v.LINKFLAGS_dshlib = ['-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = '-fintfc' v.D_HDR_F = '-fintfc-file=%s' def configure(conf): """ Configuration for gdc """ conf.find_gdc() conf.load('ar') conf.load('d') conf.common_flags_gdc() conf.d_platform_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/gfortran.py0000660000000000000000000000442200000000000022703 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import re from waflib import Utils from waflib.Tools import fc, fc_config, fc_scan, ar from waflib.Configure import conf @conf def find_gfortran(conf): """Find the gfortran program (will look in the environment variable 'FC')""" fc = conf.find_program(['gfortran','g77'], var='FC') # (fallback to g77 for systems, where no gfortran is available) conf.get_gfortran_version(fc) conf.env.FC_NAME = 'GFORTRAN' @conf def gfortran_flags(conf): v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.FORTRANMODFLAG = ['-J', ''] # template for module path v.FCFLAGS_DEBUG = ['-Werror'] # why not @conf def gfortran_modifier_win32(conf): fc_config.fortran_modifier_win32(conf) @conf def gfortran_modifier_cygwin(conf): fc_config.fortran_modifier_cygwin(conf) @conf def gfortran_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def gfortran_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() gfortran_modifier_func = getattr(conf, 'gfortran_modifier_' + dest_os, None) if gfortran_modifier_func: gfortran_modifier_func() @conf def get_gfortran_version(conf, fc): """Get the compiler version""" # ensure this is actually gfortran, not an imposter. version_re = re.compile(r"GNU\s*Fortran", re.I).search cmd = fc + ['--version'] out, err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the compiler type') # --- now get more detailed info -- see c_config.get_cc_version cmd = fc + ['-dM', '-E', '-'] out, err = fc_config.getoutput(conf, cmd, stdin=True) if out.find('__GNUC__') < 0: conf.fatal('Could not determine the compiler type') k = {} out = out.splitlines() import shlex for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k def isT(var): return var in k and k[var] != '0' conf.env.FC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__']) def configure(conf): conf.find_gfortran() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.gfortran_flags() conf.gfortran_modifier_platform() conf.check_gfortran_o_space() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/glib2.py0000660000000000000000000003663300000000000022071 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Support for GLib2 tools: * marshal * enums * gsettings * gresource """ import os import functools from waflib import Context, Task, Utils, Options, Errors, Logs from waflib.TaskGen import taskgen_method, before_method, feature, extension from waflib.Configure import conf ################## marshal files @taskgen_method def add_marshal_file(self, filename, prefix): """ Adds a file to the list of marshal files to process. Store them in the attribute *marshal_list*. :param filename: xml file to compile :type filename: string :param prefix: marshal prefix (--prefix=prefix) :type prefix: string """ if not hasattr(self, 'marshal_list'): self.marshal_list = [] self.meths.append('process_marshal') self.marshal_list.append((filename, prefix)) @before_method('process_source') def process_marshal(self): """ Processes the marshal files stored in the attribute *marshal_list* to create :py:class:`waflib.Tools.glib2.glib_genmarshal` instances. Adds the c file created to the list of source to process. """ for f, prefix in getattr(self, 'marshal_list', []): node = self.path.find_resource(f) if not node: raise Errors.WafError('file not found %r' % f) h_node = node.change_ext('.h') c_node = node.change_ext('.c') task = self.create_task('glib_genmarshal', node, [h_node, c_node]) task.env.GLIB_GENMARSHAL_PREFIX = prefix self.source = self.to_nodes(getattr(self, 'source', [])) self.source.append(c_node) class glib_genmarshal(Task.Task): vars = ['GLIB_GENMARSHAL_PREFIX', 'GLIB_GENMARSHAL'] color = 'BLUE' ext_out = ['.h'] def run(self): bld = self.generator.bld get = self.env.get_flat cmd1 = "%s %s --prefix=%s --header > %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[0].abspath() ) ret = bld.exec_command(cmd1) if ret: return ret #print self.outputs[1].abspath() c = '''#include "%s"\n''' % self.outputs[0].name self.outputs[1].write(c) cmd2 = "%s %s --prefix=%s --body >> %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[1].abspath() ) return bld.exec_command(cmd2) ########################## glib-mkenums @taskgen_method def add_enums_from_template(self, source='', target='', template='', comments=''): """ Adds a file to the list of enum files to process. Stores them in the attribute *enums_list*. :param source: enum file to process :type source: string :param target: target file :type target: string :param template: template file :type template: string :param comments: comments :type comments: string """ if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'target': target, 'template': template, 'file-head': '', 'file-prod': '', 'file-tail': '', 'enum-prod': '', 'value-head': '', 'value-prod': '', 'value-tail': '', 'comments': comments}) @taskgen_method def add_enums(self, source='', target='', file_head='', file_prod='', file_tail='', enum_prod='', value_head='', value_prod='', value_tail='', comments=''): """ Adds a file to the list of enum files to process. Stores them in the attribute *enums_list*. :param source: enum file to process :type source: string :param target: target file :type target: string :param file_head: unused :param file_prod: unused :param file_tail: unused :param enum_prod: unused :param value_head: unused :param value_prod: unused :param value_tail: unused :param comments: comments :type comments: string """ if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'template': '', 'target': target, 'file-head': file_head, 'file-prod': file_prod, 'file-tail': file_tail, 'enum-prod': enum_prod, 'value-head': value_head, 'value-prod': value_prod, 'value-tail': value_tail, 'comments': comments}) @before_method('process_source') def process_enums(self): """ Processes the enum files stored in the attribute *enum_list* to create :py:class:`waflib.Tools.glib2.glib_mkenums` instances. """ for enum in getattr(self, 'enums_list', []): task = self.create_task('glib_mkenums') env = task.env inputs = [] # process the source source_list = self.to_list(enum['source']) if not source_list: raise Errors.WafError('missing source ' + str(enum)) source_list = [self.path.find_resource(k) for k in source_list] inputs += source_list env.GLIB_MKENUMS_SOURCE = [k.abspath() for k in source_list] # find the target if not enum['target']: raise Errors.WafError('missing target ' + str(enum)) tgt_node = self.path.find_or_declare(enum['target']) if tgt_node.name.endswith('.c'): self.source.append(tgt_node) env.GLIB_MKENUMS_TARGET = tgt_node.abspath() options = [] if enum['template']: # template, if provided template_node = self.path.find_resource(enum['template']) options.append('--template %s' % (template_node.abspath())) inputs.append(template_node) params = {'file-head' : '--fhead', 'file-prod' : '--fprod', 'file-tail' : '--ftail', 'enum-prod' : '--eprod', 'value-head' : '--vhead', 'value-prod' : '--vprod', 'value-tail' : '--vtail', 'comments': '--comments'} for param, option in params.items(): if enum[param]: options.append('%s %r' % (option, enum[param])) env.GLIB_MKENUMS_OPTIONS = ' '.join(options) # update the task instance task.set_inputs(inputs) task.set_outputs(tgt_node) class glib_mkenums(Task.Task): """ Processes enum files """ run_str = '${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}' color = 'PINK' ext_out = ['.h'] ######################################### gsettings @taskgen_method def add_settings_schemas(self, filename_list): """ Adds settings files to process to *settings_schema_files* :param filename_list: files :type filename_list: list of string """ if not hasattr(self, 'settings_schema_files'): self.settings_schema_files = [] if not isinstance(filename_list, list): filename_list = [filename_list] self.settings_schema_files.extend(filename_list) @taskgen_method def add_settings_enums(self, namespace, filename_list): """ Called only once by task generator to set the enums namespace. :param namespace: namespace :type namespace: string :param filename_list: enum files to process :type filename_list: file list """ if hasattr(self, 'settings_enum_namespace'): raise Errors.WafError("Tried to add gsettings enums to %r more than once" % self.name) self.settings_enum_namespace = namespace if not isinstance(filename_list, list): filename_list = [filename_list] self.settings_enum_files = filename_list @feature('glib2') def process_settings(self): """ Processes the schema files in *settings_schema_files* to create :py:class:`waflib.Tools.glib2.glib_mkenums` instances. The same files are validated through :py:class:`waflib.Tools.glib2.glib_validate_schema` tasks. """ enums_tgt_node = [] install_files = [] settings_schema_files = getattr(self, 'settings_schema_files', []) if settings_schema_files and not self.env.GLIB_COMPILE_SCHEMAS: raise Errors.WafError ("Unable to process GSettings schemas - glib-compile-schemas was not found during configure") # 1. process gsettings_enum_files (generate .enums.xml) # if hasattr(self, 'settings_enum_files'): enums_task = self.create_task('glib_mkenums') source_list = self.settings_enum_files source_list = [self.path.find_resource(k) for k in source_list] enums_task.set_inputs(source_list) enums_task.env.GLIB_MKENUMS_SOURCE = [k.abspath() for k in source_list] target = self.settings_enum_namespace + '.enums.xml' tgt_node = self.path.find_or_declare(target) enums_task.set_outputs(tgt_node) enums_task.env.GLIB_MKENUMS_TARGET = tgt_node.abspath() enums_tgt_node = [tgt_node] install_files.append(tgt_node) options = '--comments "" --fhead "" --vhead " <@type@ id=\\"%s.@EnumName@\\">" --vprod " " --vtail " " --ftail "" ' % (self.settings_enum_namespace) enums_task.env.GLIB_MKENUMS_OPTIONS = options # 2. process gsettings_schema_files (validate .gschema.xml files) # for schema in settings_schema_files: schema_task = self.create_task ('glib_validate_schema') schema_node = self.path.find_resource(schema) if not schema_node: raise Errors.WafError("Cannot find the schema file %r" % schema) install_files.append(schema_node) source_list = enums_tgt_node + [schema_node] schema_task.set_inputs (source_list) schema_task.env.GLIB_COMPILE_SCHEMAS_OPTIONS = [("--schema-file=" + k.abspath()) for k in source_list] target_node = schema_node.change_ext('.xml.valid') schema_task.set_outputs (target_node) schema_task.env.GLIB_VALIDATE_SCHEMA_OUTPUT = target_node.abspath() # 3. schemas install task def compile_schemas_callback(bld): if not bld.is_install: return compile_schemas = Utils.to_list(bld.env.GLIB_COMPILE_SCHEMAS) destdir = Options.options.destdir paths = bld._compile_schemas_registered if destdir: paths = (os.path.join(destdir, path.lstrip(os.sep)) for path in paths) for path in paths: Logs.pprint('YELLOW', 'Updating GSettings schema cache %r' % path) if self.bld.exec_command(compile_schemas + [path]): Logs.warn('Could not update GSettings schema cache %r' % path) if self.bld.is_install: schemadir = self.env.GSETTINGSSCHEMADIR if not schemadir: raise Errors.WafError ('GSETTINGSSCHEMADIR not defined (should have been set up automatically during configure)') if install_files: self.add_install_files(install_to=schemadir, install_from=install_files) registered_schemas = getattr(self.bld, '_compile_schemas_registered', None) if not registered_schemas: registered_schemas = self.bld._compile_schemas_registered = set() self.bld.add_post_fun(compile_schemas_callback) registered_schemas.add(schemadir) class glib_validate_schema(Task.Task): """ Validates schema files """ run_str = 'rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}' color = 'PINK' ################## gresource @extension('.gresource.xml') def process_gresource_source(self, node): """ Creates tasks that turn ``.gresource.xml`` files to C code """ if not self.env.GLIB_COMPILE_RESOURCES: raise Errors.WafError ("Unable to process GResource file - glib-compile-resources was not found during configure") if 'gresource' in self.features: return h_node = node.change_ext('_xml.h') c_node = node.change_ext('_xml.c') self.create_task('glib_gresource_source', node, [h_node, c_node]) self.source.append(c_node) @feature('gresource') def process_gresource_bundle(self): """ Creates tasks to turn ``.gresource`` files from ``.gresource.xml`` files:: def build(bld): bld( features='gresource', source=['resources1.gresource.xml', 'resources2.gresource.xml'], install_path='${LIBDIR}/${PACKAGE}' ) :param source: XML files to process :type source: list of string :param install_path: installation path :type install_path: string """ for i in self.to_list(self.source): node = self.path.find_resource(i) task = self.create_task('glib_gresource_bundle', node, node.change_ext('')) inst_to = getattr(self, 'install_path', None) if inst_to: self.add_install_files(install_to=inst_to, install_from=task.outputs) class glib_gresource_base(Task.Task): """ Base class for gresource based tasks """ color = 'BLUE' base_cmd = '${GLIB_COMPILE_RESOURCES} --sourcedir=${SRC[0].parent.srcpath()} --sourcedir=${SRC[0].bld_dir()}' def scan(self): """ Scans gresource dependencies through ``glib-compile-resources --generate-dependencies command`` """ bld = self.generator.bld kw = {} kw['cwd'] = self.get_cwd() kw['quiet'] = Context.BOTH cmd = Utils.subst_vars('${GLIB_COMPILE_RESOURCES} --sourcedir=%s --sourcedir=%s --generate-dependencies %s' % ( self.inputs[0].parent.srcpath(), self.inputs[0].bld_dir(), self.inputs[0].bldpath() ), self.env) output = bld.cmd_and_log(cmd, **kw) nodes = [] names = [] for dep in output.splitlines(): if dep: node = bld.bldnode.find_node(dep) if node: nodes.append(node) else: names.append(dep) return (nodes, names) class glib_gresource_source(glib_gresource_base): """ Task to generate C source code (.h and .c files) from a gresource.xml file """ vars = ['GLIB_COMPILE_RESOURCES'] fun_h = Task.compile_fun_shell(glib_gresource_base.base_cmd + ' --target=${TGT[0].abspath()} --generate-header ${SRC}') fun_c = Task.compile_fun_shell(glib_gresource_base.base_cmd + ' --target=${TGT[1].abspath()} --generate-source ${SRC}') ext_out = ['.h'] def run(self): return self.fun_h[0](self) or self.fun_c[0](self) class glib_gresource_bundle(glib_gresource_base): """ Task to generate a .gresource binary file from a gresource.xml file """ run_str = glib_gresource_base.base_cmd + ' --target=${TGT} ${SRC}' shell = True # temporary workaround for #795 @conf def find_glib_genmarshal(conf): conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL') @conf def find_glib_mkenums(conf): if not conf.env.PERL: conf.find_program('perl', var='PERL') conf.find_program('glib-mkenums', interpreter='PERL', var='GLIB_MKENUMS') @conf def find_glib_compile_schemas(conf): # when cross-compiling, gsettings.m4 locates the program with the following: # pkg-config --variable glib_compile_schemas gio-2.0 conf.find_program('glib-compile-schemas', var='GLIB_COMPILE_SCHEMAS') def getstr(varname): return getattr(Options.options, varname, getattr(conf.env,varname, '')) gsettingsschemadir = getstr('GSETTINGSSCHEMADIR') if not gsettingsschemadir: datadir = getstr('DATADIR') if not datadir: prefix = conf.env.PREFIX datadir = os.path.join(prefix, 'share') gsettingsschemadir = os.path.join(datadir, 'glib-2.0', 'schemas') conf.env.GSETTINGSSCHEMADIR = gsettingsschemadir @conf def find_glib_compile_resources(conf): conf.find_program('glib-compile-resources', var='GLIB_COMPILE_RESOURCES') def configure(conf): """ Finds the following programs: * *glib-genmarshal* and set *GLIB_GENMARSHAL* * *glib-mkenums* and set *GLIB_MKENUMS* * *glib-compile-schemas* and set *GLIB_COMPILE_SCHEMAS* (not mandatory) * *glib-compile-resources* and set *GLIB_COMPILE_RESOURCES* (not mandatory) """ conf.find_glib_genmarshal() conf.find_glib_mkenums() conf.find_glib_compile_schemas(mandatory=False) conf.find_glib_compile_resources(mandatory=False) def options(opt): """ Adds the ``--gsettingsschemadir`` command-line option """ gr = opt.add_option_group('Installation directories') gr.add_option('--gsettingsschemadir', help='GSettings schema location [DATADIR/glib-2.0/schemas]', default='', dest='GSETTINGSSCHEMADIR') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/gnu_dirs.py0000660000000000000000000001207600000000000022677 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 """ Sets various standard variables such as INCLUDEDIR. SBINDIR and others. To use this module just call:: opt.load('gnu_dirs') and:: conf.load('gnu_dirs') Add options for the standard GNU directories, this tool will add the options found in autotools, and will update the environment with the following installation variables: ============== ========================================= ======================= Variable Description Default Value ============== ========================================= ======================= PREFIX installation prefix /usr/local EXEC_PREFIX installation prefix for binaries PREFIX BINDIR user commands EXEC_PREFIX/bin SBINDIR system binaries EXEC_PREFIX/sbin LIBEXECDIR program-specific binaries EXEC_PREFIX/libexec SYSCONFDIR host-specific configuration PREFIX/etc SHAREDSTATEDIR architecture-independent variable data PREFIX/com LOCALSTATEDIR variable data PREFIX/var LIBDIR object code libraries EXEC_PREFIX/lib INCLUDEDIR header files PREFIX/include OLDINCLUDEDIR header files for non-GCC compilers /usr/include DATAROOTDIR architecture-independent data root PREFIX/share DATADIR architecture-independent data DATAROOTDIR INFODIR GNU "info" documentation DATAROOTDIR/info LOCALEDIR locale-dependent data DATAROOTDIR/locale MANDIR manual pages DATAROOTDIR/man DOCDIR documentation root DATAROOTDIR/doc/APPNAME HTMLDIR HTML documentation DOCDIR DVIDIR DVI documentation DOCDIR PDFDIR PDF documentation DOCDIR PSDIR PostScript documentation DOCDIR ============== ========================================= ======================= """ import os, re from waflib import Utils, Options, Context gnuopts = ''' bindir, user commands, ${EXEC_PREFIX}/bin sbindir, system binaries, ${EXEC_PREFIX}/sbin libexecdir, program-specific binaries, ${EXEC_PREFIX}/libexec sysconfdir, host-specific configuration, ${PREFIX}/etc sharedstatedir, architecture-independent variable data, ${PREFIX}/com localstatedir, variable data, ${PREFIX}/var libdir, object code libraries, ${EXEC_PREFIX}/lib%s includedir, header files, ${PREFIX}/include oldincludedir, header files for non-GCC compilers, /usr/include datarootdir, architecture-independent data root, ${PREFIX}/share datadir, architecture-independent data, ${DATAROOTDIR} infodir, GNU "info" documentation, ${DATAROOTDIR}/info localedir, locale-dependent data, ${DATAROOTDIR}/locale mandir, manual pages, ${DATAROOTDIR}/man docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE} htmldir, HTML documentation, ${DOCDIR} dvidir, DVI documentation, ${DOCDIR} pdfdir, PDF documentation, ${DOCDIR} psdir, PostScript documentation, ${DOCDIR} ''' % Utils.lib64() _options = [x.split(', ') for x in gnuopts.splitlines() if x] def configure(conf): """ Reads the command-line options to set lots of variables in *conf.env*. The variables BINDIR and LIBDIR will be overwritten. """ def get_param(varname, default): return getattr(Options.options, varname, '') or default env = conf.env env.LIBDIR = env.BINDIR = [] env.EXEC_PREFIX = get_param('EXEC_PREFIX', env.PREFIX) env.PACKAGE = getattr(Context.g_module, 'APPNAME', None) or env.PACKAGE complete = False iter = 0 while not complete and iter < len(_options) + 1: iter += 1 complete = True for name, help, default in _options: name = name.upper() if not env[name]: try: env[name] = Utils.subst_vars(get_param(name, default).replace('/', os.sep), env) except TypeError: complete = False if not complete: lst = [x for x, _, _ in _options if not env[x.upper()]] raise conf.errors.WafError('Variable substitution failure %r' % lst) def options(opt): """ Adds lots of command-line options, for example:: --exec-prefix: EXEC_PREFIX """ inst_dir = opt.add_option_group('Installation prefix', 'By default, "waf install" will put the files in\ "/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\ than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"') for k in ('--prefix', '--destdir'): option = opt.parser.get_option(k) if option: opt.parser.remove_option(k) inst_dir.add_option(option) inst_dir.add_option('--exec-prefix', help = 'installation prefix for binaries [PREFIX]', default = '', dest = 'EXEC_PREFIX') dirs_options = opt.add_option_group('Installation directories') for name, help, default in _options: option_name = '--' + name str_default = default str_help = '%s [%s]' % (help, re.sub(r'\$\{([^}]+)\}', r'\1', str_default)) dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/gxx.py0000660000000000000000000000774000000000000021675 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 """ g++/llvm detection. """ from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_gxx(conf): """ Finds the program g++, and if present, try to detect its version number """ cxx = conf.find_program(['g++', 'c++'], var='CXX') conf.get_cc_version(cxx, gcc=True) conf.env.CXX_NAME = 'gcc' @conf def gxx_common_flags(conf): """ Common flags for g++ on nearly all platforms """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o'] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Wl,-Bdynamic' v.STLIB_MARKER = '-Wl,-Bstatic' v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-shared'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = ['-Wl,-Bstatic'] v.cxxstlib_PATTERN = 'lib%s.a' v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] v.CXXFLAGS_MACBUNDLE = ['-fPIC'] v.macbundle_PATTERN = '%s.bundle' @conf def gxx_modifier_win32(conf): """Configuration flags for executing gcc on Windows""" v = conf.env v.cxxprogram_PATTERN = '%s.exe' v.cxxshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.CXXFLAGS_cxxshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def gxx_modifier_cygwin(conf): """Configuration flags for executing g++ on Cygwin""" gxx_modifier_win32(conf) v = conf.env v.cxxshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_cxxshlib', ['-Wl,--enable-auto-image-base']) v.CXXFLAGS_cxxshlib = [] @conf def gxx_modifier_darwin(conf): """Configuration flags for executing g++ on MacOS""" v = conf.env v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-dynamiclib'] v.cxxshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.ARCH_ST = ['-arch'] v.LINKFLAGS_cxxstlib = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gxx_modifier_aix(conf): """Configuration flags for executing g++ on AIX""" v = conf.env v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] v.LINKFLAGS_cxxshlib = ['-shared', '-Wl,-brtl,-bexpfull'] v.SHLIB_MARKER = [] @conf def gxx_modifier_hpux(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.CFLAGS_cxxshlib = ['-fPIC','-DPIC'] v.cxxshlib_PATTERN = 'lib%s.sl' @conf def gxx_modifier_openbsd(conf): conf.env.SONAME_ST = [] @conf def gcc_modifier_osf1V(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gxx_modifier_platform(conf): """Execute platform-specific functions based on *gxx_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. gxx_modifier_func = getattr(conf, 'gxx_modifier_' + conf.env.DEST_OS, None) if gxx_modifier_func: gxx_modifier_func() def configure(conf): """ Configuration for g++ """ conf.find_gxx() conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() conf.check_gcc_o_space('cxx') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/icc.py0000660000000000000000000000113400000000000021614 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Stian Selnes 2008 # Thomas Nagy 2009-2018 (ita) """ Detects the Intel C compiler """ import sys from waflib.Tools import ccroot, ar, gcc from waflib.Configure import conf @conf def find_icc(conf): """ Finds the program icc and execute it to ensure it really is icc """ cc = conf.find_program(['icc', 'ICL'], var='CC') conf.get_cc_version(cc, icc=True) conf.env.CC_NAME = 'icc' def configure(conf): conf.find_icc() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/icpc.py0000660000000000000000000000111600000000000021774 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) """ Detects the Intel C++ compiler """ import sys from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf @conf def find_icpc(conf): """ Finds the program icpc, and execute it to ensure it really is icpc """ cxx = conf.find_program('icpc', var='CXX') conf.get_cc_version(cxx, icc=True) conf.env.CXX_NAME = 'icc' def configure(conf): conf.find_icpc() conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/ifort.py0000660000000000000000000003027100000000000022205 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import os, re, traceback from waflib import Utils, Logs, Errors from waflib.Tools import fc, fc_config, fc_scan, ar, ccroot from waflib.Configure import conf from waflib.TaskGen import after_method, feature @conf def find_ifort(conf): fc = conf.find_program('ifort', var='FC') conf.get_ifort_version(fc) conf.env.FC_NAME = 'IFORT' @conf def ifort_modifier_win32(self): v = self.env v.IFORT_WIN32 = True v.FCSTLIB_MARKER = '' v.FCSHLIB_MARKER = '' v.FCLIB_ST = v.FCSTLIB_ST = '%s.lib' v.FCLIBPATH_ST = v.STLIBPATH_ST = '/LIBPATH:%s' v.FCINCPATH_ST = '/I%s' v.FCDEFINES_ST = '/D%s' v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' v.fcshlib_PATTERN = '%s.dll' v.fcstlib_PATTERN = v.implib_PATTERN = '%s.lib' v.FCLNK_TGT_F = '/out:' v.FC_TGT_F = ['/c', '/o', ''] v.FCFLAGS_fcshlib = '' v.LINKFLAGS_fcshlib = '/DLL' v.AR_TGT_F = '/out:' v.IMPLIB_ST = '/IMPLIB:%s' v.append_value('LINKFLAGS', '/subsystem:console') if v.IFORT_MANIFEST: v.append_value('LINKFLAGS', ['/MANIFEST']) @conf def ifort_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def ifort_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() ifort_modifier_func = getattr(conf, 'ifort_modifier_' + dest_os, None) if ifort_modifier_func: ifort_modifier_func() @conf def get_ifort_version(conf, fc): """ Detects the compiler version and sets ``conf.env.FC_VERSION`` """ version_re = re.compile(r"\bIntel\b.*\bVersion\s*(?P\d*)\.(?P\d*)",re.I).search if Utils.is_win32: cmd = fc else: cmd = fc + ['-logo'] out, err = fc_config.getoutput(conf, cmd, stdin=False) match = version_re(out) or version_re(err) if not match: conf.fatal('cannot determine ifort version.') k = match.groupdict() conf.env.FC_VERSION = (k['major'], k['minor']) def configure(conf): """ Detects the Intel Fortran compilers """ if Utils.is_win32: compiler, version, path, includes, libdirs, arch = conf.detect_ifort() v = conf.env v.DEST_CPU = arch v.PATH = path v.INCLUDES = includes v.LIBPATH = libdirs v.MSVC_COMPILER = compiler try: v.MSVC_VERSION = float(version) except ValueError: v.MSVC_VERSION = float(version[:-3]) conf.find_ifort_win32() conf.ifort_modifier_win32() else: conf.find_ifort() conf.find_program('xiar', var='AR') conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.ifort_modifier_platform() all_ifort_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] """List of icl platforms""" @conf def gather_ifort_versions(conf, versions): """ List compiler versions by looking up registry keys """ version_pattern = re.compile(r'^...?.?\....?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\Fortran') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\Fortran') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_ifort_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version) path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','ifortvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) for target,arch in all_ifort_platforms: try: icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: continue else: batch_file=os.path.join(path,'bin','ifortvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) major = version[0:2] versions['intel ' + major] = targets @conf def setup_ifort(conf, versiondict): """ Checks installed compilers and targets and returns the first combination from the user's options, env, or the global supported lists that checks. :param versiondict: dict(platform -> dict(architecture -> configuration)) :type versiondict: dict(string -> dict(string -> target_compiler) :return: the compiler, revision, path, include dirs, library paths and target architecture :rtype: tuple of strings """ platforms = Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_ifort_platforms] desired_versions = conf.env.MSVC_VERSIONS or list(reversed(list(versiondict.keys()))) for version in desired_versions: try: targets = versiondict[version] except KeyError: continue for arch in platforms: try: cfg = targets[arch] except KeyError: continue cfg.evaluate() if cfg.is_valid: compiler,revision = version.rsplit(' ', 1) return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu conf.fatal('ifort: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) @conf def get_ifort_version_win32(conf, compiler, version, target, vcvars): # FIXME hack try: conf.msvc_cnt += 1 except AttributeError: conf.msvc_cnt = 1 batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) batfile.write("""@echo off set INCLUDE= set LIB= call "%s" %s echo PATH=%%PATH%% echo INCLUDE=%%INCLUDE%% echo LIB=%%LIB%%;%%LIBPATH%% """ % (vcvars,target)) sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()]) batfile.delete() lines = sout.splitlines() if not lines[0]: lines.pop(0) MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None for line in lines: if line.startswith('PATH='): path = line[5:] MSVC_PATH = path.split(';') elif line.startswith('INCLUDE='): MSVC_INCDIR = [i for i in line[8:].split(';') if i] elif line.startswith('LIB='): MSVC_LIBDIR = [i for i in line[4:].split(';') if i] if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): conf.fatal('ifort: Could not find a valid architecture for building (get_ifort_version_win32)') # Check if the compiler is usable at all. # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. env = dict(os.environ) env.update(PATH = path) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) fc = conf.find_program(compiler_name, path_list=MSVC_PATH) # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. if 'CL' in env: del(env['CL']) try: conf.cmd_and_log(fc + ['/help'], env=env) except UnicodeError: st = traceback.format_exc() if conf.logger: conf.logger.error(st) conf.fatal('ifort: Unicode error - check the code page?') except Exception as e: Logs.debug('ifort: get_ifort_version: %r %r %r -> failure %s', compiler, version, target, str(e)) conf.fatal('ifort: cannot run the compiler in get_ifort_version (run with -v to display errors)') else: Logs.debug('ifort: get_ifort_version: %r %r %r -> OK', compiler, version, target) finally: conf.env[compiler_name] = '' return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) class target_compiler(object): """ Wraps a compiler configuration; call evaluate() to determine whether the configuration is usable. """ def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): """ :param ctx: configuration context to use to eventually get the version environment :param compiler: compiler name :param cpu: target cpu :param version: compiler version number :param bat_target: ? :param bat: path to the batch file to run :param callback: optional function to take the realized environment variables tup and map it (e.g. to combine other constant paths) """ self.conf = ctx self.name = None self.is_valid = False self.is_done = False self.compiler = compiler self.cpu = cpu self.version = version self.bat_target = bat_target self.bat = bat self.callback = callback def evaluate(self): if self.is_done: return self.is_done = True try: vs = self.conf.get_ifort_version_win32(self.compiler, self.version, self.bat_target, self.bat) except Errors.ConfigurationError: self.is_valid = False return if self.callback: vs = self.callback(self, vs) self.is_valid = True (self.bindirs, self.incdirs, self.libdirs) = vs def __str__(self): return str((self.bindirs, self.incdirs, self.libdirs)) def __repr__(self): return repr((self.bindirs, self.incdirs, self.libdirs)) @conf def detect_ifort(self): return self.setup_ifort(self.get_ifort_versions(False)) @conf def get_ifort_versions(self, eval_and_save=True): """ :return: platforms to compiler configurations :rtype: dict """ dct = {} self.gather_ifort_versions(dct) return dct def _get_prog_names(self, compiler): if compiler=='intel': compiler_name = 'ifort' linker_name = 'XILINK' lib_name = 'XILIB' else: # assumes CL.exe compiler_name = 'CL' linker_name = 'LINK' lib_name = 'LIB' return compiler_name, linker_name, lib_name @conf def find_ifort_win32(conf): # the autodetection is supposed to be performed before entering in this method v = conf.env path = v.PATH compiler = v.MSVC_COMPILER version = v.MSVC_VERSION compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) v.IFORT_MANIFEST = (compiler == 'intel' and version >= 11) # compiler fc = conf.find_program(compiler_name, var='FC', path_list=path) # before setting anything, check if the compiler is really intel fortran env = dict(conf.environ) if path: env.update(PATH = ';'.join(path)) if not conf.cmd_and_log(fc + ['/nologo', '/help'], env=env): conf.fatal('not intel fortran compiler could not be identified') v.FC_NAME = 'IFORT' if not v.LINK_FC: conf.find_program(linker_name, var='LINK_FC', path_list=path, mandatory=True) if not v.AR: conf.find_program(lib_name, path_list=path, var='AR', mandatory=True) v.ARFLAGS = ['/nologo'] # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later if v.IFORT_MANIFEST: conf.find_program('MT', path_list=path, var='MT') v.MTFLAGS = ['/nologo'] try: conf.load('winres') except Errors.WafError: Logs.warn('Resource compiler not found. Compiling resource file is disabled') ####################################################################################################### ##### conf above, build below @after_method('apply_link') @feature('fc') def apply_flags_ifort(self): """ Adds additional flags implied by msvc, such as subsystems and pdb files:: def build(bld): bld.stlib(source='main.c', target='bar', subsystem='gruik') """ if not self.env.IFORT_WIN32 or not getattr(self, 'link_task', None): return is_static = isinstance(self.link_task, ccroot.stlink_task) subsystem = getattr(self, 'subsystem', '') if subsystem: subsystem = '/subsystem:%s' % subsystem flags = is_static and 'ARFLAGS' or 'LINKFLAGS' self.env.append_value(flags, subsystem) if not is_static: for f in self.env.LINKFLAGS: d = f.lower() if d[1:] == 'debug': pdbnode = self.link_task.outputs[0].change_ext('.pdb') self.link_task.outputs.append(pdbnode) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files(install_to=self.install_task.install_to, install_from=pdbnode) break @feature('fcprogram', 'fcshlib', 'fcprogram_test') @after_method('apply_link') def apply_manifest_ifort(self): """ Enables manifest embedding in Fortran DLLs when using ifort on Windows See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx """ if self.env.IFORT_WIN32 and getattr(self, 'link_task', None): # it seems ifort.exe cannot be called for linking self.link_task.env.FC = self.env.LINK_FC if self.env.IFORT_WIN32 and self.env.IFORT_MANIFEST and getattr(self, 'link_task', None): out_node = self.link_task.outputs[0] man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') self.link_task.outputs.append(man_node) self.env.DO_MANIFEST = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/intltool.py0000660000000000000000000001520000000000000022721 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Support for translation tools such as msgfmt and intltool Usage:: def configure(conf): conf.load('gnu_dirs intltool') def build(bld): # process the .po files into .gmo files, and install them in LOCALEDIR bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") # process an input file, substituting the translations from the po dir bld( features = "intltool_in", podir = "../po", style = "desktop", flags = ["-u"], source = 'kupfer.desktop.in', install_path = "${DATADIR}/applications", ) Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory. """ from __future__ import with_statement import os, re from waflib import Context, Task, Utils, Logs import waflib.Tools.ccroot from waflib.TaskGen import feature, before_method, taskgen_method from waflib.Logs import error from waflib.Configure import conf _style_flags = { 'ba': '-b', 'desktop': '-d', 'keys': '-k', 'quoted': '--quoted-style', 'quotedxml': '--quotedxml-style', 'rfc822deb': '-r', 'schemas': '-s', 'xml': '-x', } @taskgen_method def ensure_localedir(self): """ Expands LOCALEDIR from DATAROOTDIR/locale if possible, or falls back to PREFIX/share/locale """ # use the tool gnu_dirs to provide options to define this if not self.env.LOCALEDIR: if self.env.DATAROOTDIR: self.env.LOCALEDIR = os.path.join(self.env.DATAROOTDIR, 'locale') else: self.env.LOCALEDIR = os.path.join(self.env.PREFIX, 'share', 'locale') @before_method('process_source') @feature('intltool_in') def apply_intltool_in_f(self): """ Creates tasks to translate files by intltool-merge:: def build(bld): bld( features = "intltool_in", podir = "../po", style = "desktop", flags = ["-u"], source = 'kupfer.desktop.in', install_path = "${DATADIR}/applications", ) :param podir: location of the .po files :type podir: string :param source: source files to process :type source: list of string :param style: the intltool-merge mode of operation, can be one of the following values: ``ba``, ``desktop``, ``keys``, ``quoted``, ``quotedxml``, ``rfc822deb``, ``schemas`` and ``xml``. See the ``intltool-merge`` man page for more information about supported modes of operation. :type style: string :param flags: compilation flags ("-quc" by default) :type flags: list of string :param install_path: installation path :type install_path: string """ try: self.meths.remove('process_source') except ValueError: pass self.ensure_localedir() podir = getattr(self, 'podir', '.') podirnode = self.path.find_dir(podir) if not podirnode: error("could not find the podir %r" % podir) return cache = getattr(self, 'intlcache', '.intlcache') self.env.INTLCACHE = [os.path.join(str(self.path.get_bld()), podir, cache)] self.env.INTLPODIR = podirnode.bldpath() self.env.append_value('INTLFLAGS', getattr(self, 'flags', self.env.INTLFLAGS_DEFAULT)) if '-c' in self.env.INTLFLAGS: self.bld.fatal('Redundant -c flag in intltool task %r' % self) style = getattr(self, 'style', None) if style: try: style_flag = _style_flags[style] except KeyError: self.bld.fatal('intltool_in style "%s" is not valid' % style) self.env.append_unique('INTLFLAGS', [style_flag]) for i in self.to_list(self.source): node = self.path.find_resource(i) task = self.create_task('intltool', node, node.change_ext('')) inst = getattr(self, 'install_path', None) if inst: self.add_install_files(install_to=inst, install_from=task.outputs) @feature('intltool_po') def apply_intltool_po(self): """ Creates tasks to process po files:: def build(bld): bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") The relevant task generator arguments are: :param podir: directory of the .po files :type podir: string :param appname: name of the application :type appname: string :param install_path: installation directory :type install_path: string The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process. """ try: self.meths.remove('process_source') except ValueError: pass self.ensure_localedir() appname = getattr(self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name')) podir = getattr(self, 'podir', '.') inst = getattr(self, 'install_path', '${LOCALEDIR}') linguas = self.path.find_node(os.path.join(podir, 'LINGUAS')) if linguas: # scan LINGUAS file for locales to process with open(linguas.abspath()) as f: langs = [] for line in f.readlines(): # ignore lines containing comments if not line.startswith('#'): langs += line.split() re_linguas = re.compile('[-a-zA-Z_@.]+') for lang in langs: # Make sure that we only process lines which contain locales if re_linguas.match(lang): node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po')) task = self.create_task('po', node, node.change_ext('.mo')) if inst: filename = task.outputs[0].name (langname, ext) = os.path.splitext(filename) inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo' self.add_install_as(install_to=inst_file, install_from=task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644)) else: Logs.pprint('RED', "Error no LINGUAS file found in po directory") class po(Task.Task): """ Compiles .po files into .gmo files """ run_str = '${MSGFMT} -o ${TGT} ${SRC}' color = 'BLUE' class intltool(Task.Task): """ Calls intltool-merge to update translation files """ run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE_ST:INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}' color = 'BLUE' @conf def find_msgfmt(conf): """ Detects msgfmt and sets the ``MSGFMT`` variable """ conf.find_program('msgfmt', var='MSGFMT') @conf def find_intltool_merge(conf): """ Detects intltool-merge """ if not conf.env.PERL: conf.find_program('perl', var='PERL') conf.env.INTLCACHE_ST = '--cache=%s' conf.env.INTLFLAGS_DEFAULT = ['-q', '-u'] conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL') def configure(conf): """ Detects the program *msgfmt* and set *conf.env.MSGFMT*. Detects the program *intltool-merge* and set *conf.env.INTLTOOL*. It is possible to set INTLTOOL in the environment, but it must not have spaces in it:: $ INTLTOOL="/path/to/the program/intltool" waf configure If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*. """ conf.find_msgfmt() conf.find_intltool_merge() if conf.env.CC or conf.env.CXX: conf.check(header_name='locale.h') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/irixcc.py0000660000000000000000000000222300000000000022337 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # imported from samba """ Compiler definition for irix/MIPSpro cc compiler """ from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_irixcc(conf): v = conf.env cc = conf.find_program('cc', var='CC') try: conf.cmd_and_log(cc + ['-version']) except Errors.WafError: conf.fatal('%r -version could not be executed' % cc) v.CC_NAME = 'irix' @conf def irixcc_common_flags(conf): v = conf.env v.CC_SRC_F = '' v.CC_TGT_F = ['-c', '-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o'] v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.cprogram_PATTERN = '%s' v.cshlib_PATTERN = 'lib%s.so' v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_irixcc() conf.find_ar() conf.irixcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/javaw.py0000660000000000000000000004076200000000000022200 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Java support Javac is one of the few compilers that behaves very badly: #. it outputs files where it wants to (-d is only for the package root) #. it recompiles files silently behind your back #. it outputs an undefined amount of files (inner classes) Remember that the compilation can be performed using Jython[1] rather than regular Python. Instead of running one of the following commands:: ./waf configure python waf configure You would have to run:: java -jar /path/to/jython.jar waf configure [1] http://www.jython.org/ Usage ===== Load the "java" tool. def configure(conf): conf.load('java') Java tools will be autodetected and eventually, if present, the quite standard JAVA_HOME environment variable will be used. The also standard CLASSPATH variable is used for library searching. In configuration phase checks can be done on the system environment, for example to check if a class is known in the classpath:: conf.check_java_class('java.io.FileOutputStream') or if the system supports JNI applications building:: conf.check_jni_headers() The java tool supports compiling java code, creating jar files and creating javadoc documentation. This can be either done separately or together in a single definition. For example to manage them separately:: bld(features = 'javac', srcdir = 'src', compat = '1.7', use = 'animals', name = 'cats-src', ) bld(features = 'jar', basedir = '.', destfile = '../cats.jar', name = 'cats', use = 'cats-src' ) Or together by defining all the needed attributes:: bld(features = 'javac jar javadoc', srcdir = 'src/', # folder containing the sources to compile outdir = 'src', # folder where to output the classes (in the build directory) compat = '1.6', # java compatibility version number classpath = ['.', '..'], # jar basedir = 'src', # folder containing the classes and other files to package (must match outdir) destfile = 'foo.jar', # do not put the destfile in the folder of the java classes! use = 'NNN', jaropts = ['-C', 'default/src/', '.'], # can be used to give files manifest = 'src/Manifest.mf', # Manifest file to include # javadoc javadoc_package = ['com.meow' , 'com.meow.truc.bar', 'com.meow.truc.foo'], javadoc_output = 'javadoc', ) External jar dependencies can be mapped to a standard waf "use" dependency by setting an environment variable with a CLASSPATH prefix in the configuration, for example:: conf.env.CLASSPATH_NNN = ['aaaa.jar', 'bbbb.jar'] and then NNN can be freely used in rules as:: use = 'NNN', In the java tool the dependencies via use are not transitive by default, as this necessity depends on the code. To enable recursive dependency scanning use on a specific rule: recurse_use = True Or build-wise by setting RECURSE_JAVA: bld.env.RECURSE_JAVA = True Unit tests can be integrated in the waf unit test environment using the javatest extra. """ import os, shutil from waflib import Task, Utils, Errors, Node from waflib.Configure import conf from waflib.TaskGen import feature, before_method, after_method, taskgen_method from waflib.Tools import ccroot ccroot.USELIB_VARS['javac'] = set(['CLASSPATH', 'JAVACFLAGS']) SOURCE_RE = '**/*.java' JAR_RE = '**/*' class_check_source = ''' public class Test { public static void main(String[] argv) { Class lib; if (argv.length < 1) { System.err.println("Missing argument"); System.exit(77); } try { lib = Class.forName(argv[0]); } catch (ClassNotFoundException e) { System.err.println("ClassNotFoundException"); System.exit(1); } lib = null; System.exit(0); } } ''' @feature('javac') @before_method('process_source') def apply_java(self): """ Create a javac task for compiling *.java files*. There can be only one javac task by task generator. """ Utils.def_attrs(self, jarname='', classpath='', sourcepath='.', srcdir='.', jar_mf_attributes={}, jar_mf_classpath=[]) outdir = getattr(self, 'outdir', None) if outdir: if not isinstance(outdir, Node.Node): outdir = self.path.get_bld().make_node(self.outdir) else: outdir = self.path.get_bld() outdir.mkdir() self.outdir = outdir self.env.OUTDIR = outdir.abspath() self.javac_task = tsk = self.create_task('javac') tmp = [] srcdir = getattr(self, 'srcdir', '') if isinstance(srcdir, Node.Node): srcdir = [srcdir] for x in Utils.to_list(srcdir): if isinstance(x, Node.Node): y = x else: y = self.path.find_dir(x) if not y: self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) tmp.append(y) tsk.srcdir = tmp if getattr(self, 'compat', None): tsk.env.append_value('JAVACFLAGS', ['-source', str(self.compat)]) if hasattr(self, 'sourcepath'): fold = [isinstance(x, Node.Node) and x or self.path.find_dir(x) for x in self.to_list(self.sourcepath)] names = os.pathsep.join([x.srcpath() for x in fold]) else: names = [x.srcpath() for x in tsk.srcdir] if names: tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names]) @taskgen_method def java_use_rec(self, name, **kw): """ Processes recursively the *use* attribute for each referred java compilation """ if name in self.tmp_use_seen: return self.tmp_use_seen.append(name) try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) return else: y.post() # Add generated JAR name for CLASSPATH. Task ordering (set_run_after) # is already guaranteed by ordering done between the single tasks if hasattr(y, 'jar_task'): self.use_lst.append(y.jar_task.outputs[0].abspath()) else: if hasattr(y,'outdir'): self.use_lst.append(y.outdir.abspath()) else: self.use_lst.append(y.path.get_bld().abspath()) for x in self.to_list(getattr(y, 'use', [])): self.java_use_rec(x) @feature('javac') @before_method('propagate_uselib_vars') @after_method('apply_java') def use_javac_files(self): """ Processes the *use* attribute referring to other java compilations """ self.use_lst = [] self.tmp_use_seen = [] self.uselib = self.to_list(getattr(self, 'uselib', [])) names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: tg = get(x) except Errors.WafError: self.uselib.append(x) else: tg.post() if hasattr(tg, 'jar_task'): self.use_lst.append(tg.jar_task.outputs[0].abspath()) self.javac_task.set_run_after(tg.jar_task) self.javac_task.dep_nodes.extend(tg.jar_task.outputs) else: if hasattr(tg, 'outdir'): base_node = tg.outdir else: base_node = tg.path.get_bld() self.use_lst.append(base_node.abspath()) self.javac_task.dep_nodes.extend([dx for dx in base_node.ant_glob(JAR_RE, remove=False, quiet=True)]) for tsk in tg.tasks: self.javac_task.set_run_after(tsk) # If recurse use scan is enabled recursively add use attribute for each used one if getattr(self, 'recurse_use', False) or self.bld.env.RECURSE_JAVA: self.java_use_rec(x) self.env.append_value('CLASSPATH', self.use_lst) @feature('javac') @after_method('apply_java', 'propagate_uselib_vars', 'use_javac_files') def set_classpath(self): """ Sets the CLASSPATH value on the *javac* task previously created. """ if getattr(self, 'classpath', None): self.env.append_unique('CLASSPATH', getattr(self, 'classpath', [])) for x in self.tasks: x.env.CLASSPATH = os.pathsep.join(self.env.CLASSPATH) + os.pathsep @feature('jar') @after_method('apply_java', 'use_javac_files') @before_method('process_source') def jar_files(self): """ Creates a jar task (one maximum per task generator) """ destfile = getattr(self, 'destfile', 'test.jar') jaropts = getattr(self, 'jaropts', []) manifest = getattr(self, 'manifest', None) basedir = getattr(self, 'basedir', None) if basedir: if not isinstance(self.basedir, Node.Node): basedir = self.path.get_bld().make_node(basedir) else: basedir = self.path.get_bld() if not basedir: self.bld.fatal('Could not find the basedir %r for %r' % (self.basedir, self)) self.jar_task = tsk = self.create_task('jar_create') if manifest: jarcreate = getattr(self, 'jarcreate', 'cfm') if not isinstance(manifest,Node.Node): node = self.path.find_resource(manifest) else: node = manifest if not node: self.bld.fatal('invalid manifest file %r for %r' % (manifest, self)) tsk.dep_nodes.append(node) jaropts.insert(0, node.abspath()) else: jarcreate = getattr(self, 'jarcreate', 'cf') if not isinstance(destfile, Node.Node): destfile = self.path.find_or_declare(destfile) if not destfile: self.bld.fatal('invalid destfile %r for %r' % (destfile, self)) tsk.set_outputs(destfile) tsk.basedir = basedir jaropts.append('-C') jaropts.append(basedir.bldpath()) jaropts.append('.') tsk.env.JAROPTS = jaropts tsk.env.JARCREATE = jarcreate if getattr(self, 'javac_task', None): tsk.set_run_after(self.javac_task) @feature('jar') @after_method('jar_files') def use_jar_files(self): """ Processes the *use* attribute to set the build order on the tasks created by another task generator. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: y = get(x) except Errors.WafError: self.uselib.append(x) else: y.post() self.jar_task.run_after.update(y.tasks) class JTask(Task.Task): """ Base class for java and jar tasks; provides functionality to run long commands """ def split_argfile(self, cmd): inline = [cmd[0]] infile = [] for x in cmd[1:]: # jar and javac do not want -J flags in @file if x.startswith('-J'): inline.append(x) else: infile.append(self.quote_flag(x)) return (inline, infile) class jar_create(JTask): """ Creates a jar file """ color = 'GREEN' run_str = '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}' def runnable_status(self): """ Wait for dependent tasks to be executed, then read the files to update the list of inputs. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: try: self.inputs = [x for x in self.basedir.ant_glob(JAR_RE, remove=False, quiet=True) if id(x) != id(self.outputs[0])] except Exception: raise Errors.WafError('Could not find the basedir %r for %r' % (self.basedir, self)) return super(jar_create, self).runnable_status() class javac(JTask): """ Compiles java files """ color = 'BLUE' run_str = '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}' vars = ['CLASSPATH', 'JAVACFLAGS', 'JAVAC', 'OUTDIR'] """ The javac task will be executed again if the variables CLASSPATH, JAVACFLAGS, JAVAC or OUTDIR change. """ def uid(self): """Identify java tasks by input&output folder""" lst = [self.__class__.__name__, self.generator.outdir.abspath()] for x in self.srcdir: lst.append(x.abspath()) return Utils.h_list(lst) def runnable_status(self): """ Waits for dependent tasks to be complete, then read the file system to find the input nodes. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: self.inputs = [] for x in self.srcdir: if x.exists(): self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False, quiet=True)) return super(javac, self).runnable_status() def post_run(self): """ List class files created """ for node in self.generator.outdir.ant_glob('**/*.class', quiet=True): self.generator.bld.node_sigs[node] = self.uid() self.generator.bld.task_sigs[self.uid()] = self.cache_sig @feature('javadoc') @after_method('process_rule') def create_javadoc(self): """ Creates a javadoc task (feature 'javadoc') """ tsk = self.create_task('javadoc') tsk.classpath = getattr(self, 'classpath', []) self.javadoc_package = Utils.to_list(self.javadoc_package) if not isinstance(self.javadoc_output, Node.Node): self.javadoc_output = self.bld.path.find_or_declare(self.javadoc_output) class javadoc(Task.Task): """ Builds java documentation """ color = 'BLUE' def __str__(self): return '%s: %s -> %s\n' % (self.__class__.__name__, self.generator.srcdir, self.generator.javadoc_output) def run(self): env = self.env bld = self.generator.bld wd = bld.bldnode #add src node + bld node (for generated java code) srcpath = self.generator.path.abspath() + os.sep + self.generator.srcdir srcpath += os.pathsep srcpath += self.generator.path.get_bld().abspath() + os.sep + self.generator.srcdir classpath = env.CLASSPATH classpath += os.pathsep classpath += os.pathsep.join(self.classpath) classpath = "".join(classpath) self.last_cmd = lst = [] lst.extend(Utils.to_list(env.JAVADOC)) lst.extend(['-d', self.generator.javadoc_output.abspath()]) lst.extend(['-sourcepath', srcpath]) lst.extend(['-classpath', classpath]) lst.extend(['-subpackages']) lst.extend(self.generator.javadoc_package) lst = [x for x in lst if x] self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) def post_run(self): nodes = self.generator.javadoc_output.ant_glob('**', quiet=True) for node in nodes: self.generator.bld.node_sigs[node] = self.uid() self.generator.bld.task_sigs[self.uid()] = self.cache_sig def configure(self): """ Detects the javac, java and jar programs """ # If JAVA_PATH is set, we prepend it to the path list java_path = self.environ['PATH'].split(os.pathsep) v = self.env if 'JAVA_HOME' in self.environ: java_path = [os.path.join(self.environ['JAVA_HOME'], 'bin')] + java_path self.env.JAVA_HOME = [self.environ['JAVA_HOME']] for x in 'javac java jar javadoc'.split(): self.find_program(x, var=x.upper(), path_list=java_path, mandatory=(x not in ('javadoc'))) if 'CLASSPATH' in self.environ: v.CLASSPATH = self.environ['CLASSPATH'] if not v.JAR: self.fatal('jar is required for making java packages') if not v.JAVAC: self.fatal('javac is required for compiling java classes') v.JARCREATE = 'cf' # can use cvf v.JAVACFLAGS = [] @conf def check_java_class(self, classname, with_classpath=None): """ Checks if the specified java class exists :param classname: class to check, like java.util.HashMap :type classname: string :param with_classpath: additional classpath to give :type with_classpath: string """ javatestdir = '.waf-javatest' classpath = javatestdir if self.env.CLASSPATH: classpath += os.pathsep + self.env.CLASSPATH if isinstance(with_classpath, str): classpath += os.pathsep + with_classpath shutil.rmtree(javatestdir, True) os.mkdir(javatestdir) Utils.writef(os.path.join(javatestdir, 'Test.java'), class_check_source) # Compile the source self.exec_command(self.env.JAVAC + [os.path.join(javatestdir, 'Test.java')], shell=False) # Try to run the app cmd = self.env.JAVA + ['-cp', classpath, 'Test', classname] self.to_log("%s\n" % str(cmd)) found = self.exec_command(cmd, shell=False) self.msg('Checking for java class %s' % classname, not found) shutil.rmtree(javatestdir, True) return found @conf def check_jni_headers(conf): """ Checks for jni headers and libraries. On success the conf.env variables xxx_JAVA are added for use in C/C++ targets:: def options(opt): opt.load('compiler_c') def configure(conf): conf.load('compiler_c java') conf.check_jni_headers() def build(bld): bld.shlib(source='a.c', target='app', use='JAVA') """ if not conf.env.CC_NAME and not conf.env.CXX_NAME: conf.fatal('load a compiler first (gcc, g++, ..)') if not conf.env.JAVA_HOME: conf.fatal('set JAVA_HOME in the system environment') # jni requires the jvm javaHome = conf.env.JAVA_HOME[0] dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/include') if dir is None: dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/../Headers') # think different?! if dir is None: conf.fatal('JAVA_HOME does not seem to be set properly') f = dir.ant_glob('**/(jni|jni_md).h') incDirs = [x.parent.abspath() for x in f] dir = conf.root.find_dir(conf.env.JAVA_HOME[0]) f = dir.ant_glob('**/*jvm.(so|dll|dylib)') libDirs = [x.parent.abspath() for x in f] or [javaHome] # On windows, we need both the .dll and .lib to link. On my JDK, they are # in different directories... f = dir.ant_glob('**/*jvm.(lib)') if f: libDirs = [[x, y.parent.abspath()] for x in libDirs for y in f] if conf.env.DEST_OS == 'freebsd': conf.env.append_unique('LINKFLAGS_JAVA', '-pthread') for d in libDirs: try: conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm', libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA') except Exception: pass else: break else: conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/ldc2.py0000660000000000000000000000224100000000000021702 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Alex Rønne Petersen, 2012 (alexrp/Zor) from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_ldc2(conf): """ Finds the program *ldc2* and set the variable *D* """ conf.find_program(['ldc2'], var='D') out = conf.cmd_and_log(conf.env.D + ['-version']) if out.find("based on DMD v2.") == -1: conf.fatal("detected compiler is not ldc2") @conf def common_flags_ldc2(conf): """ Sets the D flags required by *ldc2* """ v = conf.env v.D_SRC_F = ['-c'] v.D_TGT_F = '-of%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-of%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-L-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L-L%s' v.LINKFLAGS_dshlib = ['-L-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = ['-H', '-Hf'] v.D_HDR_F = '%s' v.LINKFLAGS = [] v.DFLAGS_dshlib = ['-relocation-model=pic'] def configure(conf): """ Configuration for *ldc2* """ conf.find_ldc2() conf.load('ar') conf.load('d') conf.common_flags_ldc2() conf.d_platform_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/lua.py0000660000000000000000000000152300000000000021641 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Sebastian Schlingmann, 2008 # Thomas Nagy, 2008-2018 (ita) """ Lua support. Compile *.lua* files into *.luac*:: def configure(conf): conf.load('lua') conf.env.LUADIR = '/usr/local/share/myapp/scripts/' def build(bld): bld(source='foo.lua') """ from waflib.TaskGen import extension from waflib import Task @extension('.lua') def add_lua(self, node): tsk = self.create_task('luac', node, node.change_ext('.luac')) inst_to = getattr(self, 'install_path', self.env.LUADIR and '${LUADIR}' or None) if inst_to: self.add_install_files(install_to=inst_to, install_from=tsk.outputs) return tsk class luac(Task.Task): run_str = '${LUAC} -s -o ${TGT} ${SRC}' color = 'PINK' def configure(conf): """ Detect the luac compiler and set *conf.env.LUAC* """ conf.find_program('luac', var='LUAC') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/md5_tstamp.py0000660000000000000000000000170300000000000023135 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Re-calculate md5 hashes of files only when the file time have changed:: def options(opt): opt.load('md5_tstamp') The hashes can also reflect either the file contents (STRONGEST=True) or the file time and file size. The performance benefits of this module are usually insignificant. """ import os, stat from waflib import Utils, Build, Node STRONGEST = True Build.SAVED_ATTRS.append('hashes_md5_tstamp') def h_file(self): filename = self.abspath() st = os.stat(filename) cache = self.ctx.hashes_md5_tstamp if filename in cache and cache[filename][0] == st.st_mtime: return cache[filename][1] if STRONGEST: ret = Utils.h_file(filename) else: if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('Not a file') ret = Utils.md5(str((st.st_mtime, st.st_size)).encode()).digest() cache[filename] = (st.st_mtime, ret) return ret h_file.__doc__ = Node.Node.h_file.__doc__ Node.Node.h_file = h_file ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/Tools/msvc.py0000660000000000000000000010444000000000000022032 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 (dv) # Tamas Pal, 2007 (folti) # Nicolas Mercier, 2009 # Matt Clarkson, 2012 """ Microsoft Visual C++/Intel C++ compiler support If you get detection problems, first try any of the following:: chcp 65001 set PYTHONIOENCODING=... set PYTHONLEGACYWINDOWSSTDIO=1 Usage:: $ waf configure --msvc_version="msvc 10.0,msvc 9.0" --msvc_target="x64" or:: def configure(conf): conf.env.MSVC_VERSIONS = ['msvc 10.0', 'msvc 9.0', 'msvc 8.0', 'msvc 7.1', 'msvc 7.0', 'msvc 6.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0'] conf.env.MSVC_TARGETS = ['x64'] conf.load('msvc') or:: def configure(conf): conf.load('msvc', funs='no_autodetect') conf.check_lib_msvc('gdi32') conf.check_libs_msvc('kernel32 user32') def build(bld): tg = bld.program(source='main.c', target='app', use='KERNEL32 USER32 GDI32') Platforms and targets will be tested in the order they appear; the first good configuration will be used. To force testing all the configurations that are not used, use the ``--no-msvc-lazy`` option or set ``conf.env.MSVC_LAZY_AUTODETECT=False``. Supported platforms: ia64, x64, x86, x86_amd64, x86_ia64, x86_arm, amd64_x86, amd64_arm Compilers supported: * msvc => Visual Studio, versions 6.0 (VC 98, VC .NET 2002) to 15 (Visual Studio 2017) * wsdk => Windows SDK, versions 6.0, 6.1, 7.0, 7.1, 8.0 * icl => Intel compiler, versions 9, 10, 11, 13 * winphone => Visual Studio to target Windows Phone 8 native (version 8.0 for now) * Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i) * PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i) To use WAF in a VS2008 Make file project (see http://code.google.com/p/waf/issues/detail?id=894) You may consider to set the environment variable "VS_UNICODE_OUTPUT" to nothing before calling waf. So in your project settings use something like 'cmd.exe /C "set VS_UNICODE_OUTPUT=& set PYTHONUNBUFFERED=true & waf build"'. cmd.exe /C "chcp 1252 & set PYTHONUNBUFFERED=true && set && waf configure" Setting PYTHONUNBUFFERED gives the unbuffered output. """ import os, sys, re, traceback from waflib import Utils, Logs, Options, Errors from waflib.TaskGen import after_method, feature from waflib.Configure import conf from waflib.Tools import ccroot, c, cxx, ar g_msvc_systemlibs = ''' aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32 osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32 shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32 traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp '''.split() """importlibs provided by MSVC/Platform SDK. Do NOT search them""" all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64'), ('x86_arm', 'arm'), ('x86_arm64', 'arm64'), ('amd64_x86', 'x86'), ('amd64_arm', 'arm'), ('amd64_arm64', 'arm64') ] """List of msvc platforms""" all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ] """List of wince platforms""" all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] """List of icl platforms""" def options(opt): default_ver = '' vsver = os.getenv('VSCMD_VER') if vsver: m = re.match(r'(^\d+\.\d+).*', vsver) if m: default_ver = 'msvc %s' % m.group(1) opt.add_option('--msvc_version', type='string', help = 'msvc version, eg: "msvc 10.0,msvc 9.0"', default=default_ver) opt.add_option('--msvc_targets', type='string', help = 'msvc targets, eg: "x64,arm"', default='') opt.add_option('--no-msvc-lazy', action='store_false', help = 'lazily check msvc target environments', default=True, dest='msvc_lazy') @conf def setup_msvc(conf, versiondict): """ Checks installed compilers and targets and returns the first combination from the user's options, env, or the global supported lists that checks. :param versiondict: dict(platform -> dict(architecture -> configuration)) :type versiondict: dict(string -> dict(string -> target_compiler) :return: the compiler, revision, path, include dirs, library paths and target architecture :rtype: tuple of strings """ platforms = getattr(Options.options, 'msvc_targets', '').split(',') if platforms == ['']: platforms=Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms] desired_versions = getattr(Options.options, 'msvc_version', '').split(',') if desired_versions == ['']: desired_versions = conf.env.MSVC_VERSIONS or list(reversed(sorted(versiondict.keys()))) # Override lazy detection by evaluating after the fact. lazy_detect = getattr(Options.options, 'msvc_lazy', True) if conf.env.MSVC_LAZY_AUTODETECT is False: lazy_detect = False if not lazy_detect: for val in versiondict.values(): for arch in list(val.keys()): cfg = val[arch] cfg.evaluate() if not cfg.is_valid: del val[arch] conf.env.MSVC_INSTALLED_VERSIONS = versiondict for version in desired_versions: Logs.debug('msvc: detecting %r - %r', version, desired_versions) try: targets = versiondict[version] except KeyError: continue seen = set() for arch in platforms: if arch in seen: continue else: seen.add(arch) try: cfg = targets[arch] except KeyError: continue cfg.evaluate() if cfg.is_valid: compiler,revision = version.rsplit(' ', 1) return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu conf.fatal('msvc: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) @conf def get_msvc_version(conf, compiler, version, target, vcvars): """ Checks that an installed compiler actually runs and uses vcvars to obtain the environment needed by the compiler. :param compiler: compiler type, for looking up the executable name :param version: compiler version, for debugging only :param target: target architecture :param vcvars: batch file to run to check the environment :return: the location of the compiler executable, the location of include dirs, and the library paths :rtype: tuple of strings """ Logs.debug('msvc: get_msvc_version: %r %r %r', compiler, version, target) try: conf.msvc_cnt += 1 except AttributeError: conf.msvc_cnt = 1 batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) batfile.write("""@echo off set INCLUDE= set LIB= call "%s" %s echo PATH=%%PATH%% echo INCLUDE=%%INCLUDE%% echo LIB=%%LIB%%;%%LIBPATH%% """ % (vcvars,target)) sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()]) lines = sout.splitlines() if not lines[0]: lines.pop(0) MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None for line in lines: if line.startswith('PATH='): path = line[5:] MSVC_PATH = path.split(';') elif line.startswith('INCLUDE='): MSVC_INCDIR = [i for i in line[8:].split(';') if i] elif line.startswith('LIB='): MSVC_LIBDIR = [i for i in line[4:].split(';') if i] if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): conf.fatal('msvc: Could not find a valid architecture for building (get_msvc_version_3)') # Check if the compiler is usable at all. # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. env = dict(os.environ) env.update(PATH = path) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) cxx = conf.find_program(compiler_name, path_list=MSVC_PATH) # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. if 'CL' in env: del(env['CL']) try: conf.cmd_and_log(cxx + ['/help'], env=env) except UnicodeError: st = traceback.format_exc() if conf.logger: conf.logger.error(st) conf.fatal('msvc: Unicode error - check the code page?') except Exception as e: Logs.debug('msvc: get_msvc_version: %r %r %r -> failure %s', compiler, version, target, str(e)) conf.fatal('msvc: cannot run the compiler in get_msvc_version (run with -v to display errors)') else: Logs.debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target) finally: conf.env[compiler_name] = '' return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) def gather_wince_supported_platforms(): """ Checks SmartPhones SDKs :param versions: list to modify :type versions: list """ supported_wince_platforms = [] try: ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs') except OSError: try: ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs') except OSError: ce_sdk = '' if not ce_sdk: return supported_wince_platforms index = 0 while 1: try: sdk_device = Utils.winreg.EnumKey(ce_sdk, index) sdk = Utils.winreg.OpenKey(ce_sdk, sdk_device) except OSError: break index += 1 try: path,type = Utils.winreg.QueryValueEx(sdk, 'SDKRootDir') except OSError: try: path,type = Utils.winreg.QueryValueEx(sdk,'SDKInformation') except OSError: continue path,xml = os.path.split(path) path = str(path) path,device = os.path.split(path) if not device: path,device = os.path.split(path) platforms = [] for arch,compiler in all_wince_platforms: if os.path.isdir(os.path.join(path, device, 'Lib', arch)): platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch))) if platforms: supported_wince_platforms.append((device, platforms)) return supported_wince_platforms def gather_msvc_detected_versions(): #Detected MSVC versions! version_pattern = re.compile(r'^(\d\d?\.\d\d?)(Exp)?$') detected_versions = [] for vcver,vcvar in (('VCExpress','Exp'), ('VisualStudio','')): prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\' + vcver try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) except OSError: prefix = 'SOFTWARE\\Microsoft\\' + vcver try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) except OSError: continue index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 match = version_pattern.match(version) if match: versionnumber = float(match.group(1)) else: continue detected_versions.append((versionnumber, version+vcvar, prefix+'\\'+version)) def fun(tup): return tup[0] detected_versions.sort(key = fun) return detected_versions class target_compiler(object): """ Wrap a compiler configuration; call evaluate() to determine whether the configuration is usable. """ def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): """ :param ctx: configuration context to use to eventually get the version environment :param compiler: compiler name :param cpu: target cpu :param version: compiler version number :param bat_target: ? :param bat: path to the batch file to run """ self.conf = ctx self.name = None self.is_valid = False self.is_done = False self.compiler = compiler self.cpu = cpu self.version = version self.bat_target = bat_target self.bat = bat self.callback = callback def evaluate(self): if self.is_done: return self.is_done = True try: vs = self.conf.get_msvc_version(self.compiler, self.version, self.bat_target, self.bat) except Errors.ConfigurationError: self.is_valid = False return if self.callback: vs = self.callback(self, vs) self.is_valid = True (self.bindirs, self.incdirs, self.libdirs) = vs def __str__(self): return str((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) def __repr__(self): return repr((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) @conf def gather_wsdk_versions(conf, versions): """ Use winreg to add the msvc versions to the input list :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^v..?.?\...?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue try: msvc_version = Utils.winreg.OpenKey(all_versions, version) path,type = Utils.winreg.QueryValueEx(msvc_version,'InstallationFolder') except OSError: continue if path and os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')): targets = {} for target,arch in all_msvc_platforms: targets[target] = target_compiler(conf, 'wsdk', arch, version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')) versions['wsdk ' + version[1:]] = targets @conf def gather_msvc_targets(conf, versions, version, vc_path): #Looking for normal MSVC compilers! targets = {} if os.path.isfile(os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')): for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')) elif os.path.isfile(os.path.join(vc_path, 'vcvarsall.bat')): for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'vcvarsall.bat')) elif os.path.isfile(os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')): targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, 'x86', os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')) elif os.path.isfile(os.path.join(vc_path, 'Bin', 'vcvars32.bat')): targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, '', os.path.join(vc_path, 'Bin', 'vcvars32.bat')) if targets: versions['msvc %s' % version] = targets @conf def gather_wince_targets(conf, versions, version, vc_path, vsvars, supported_platforms): #Looking for Win CE compilers! for device,platforms in supported_platforms: targets = {} for platform,compiler,include,lib in platforms: winCEpath = os.path.join(vc_path, 'ce') if not os.path.isdir(winCEpath): continue if os.path.isdir(os.path.join(winCEpath, 'lib', platform)): bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] incdirs = [os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include'), include] libdirs = [os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform), lib] def combine_common(obj, compiler_env): # TODO this is likely broken, remove in waf 2.1 (common_bindirs,_1,_2) = compiler_env return (bindirs + common_bindirs, incdirs, libdirs) targets[platform] = target_compiler(conf, 'msvc', platform, version, 'x86', vsvars, combine_common) if targets: versions[device + ' ' + version] = targets @conf def gather_winphone_targets(conf, versions, version, vc_path, vsvars): #Looking for WinPhone compilers targets = {} for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'winphone', realtarget, version, target, vsvars) if targets: versions['winphone ' + version] = targets @conf def gather_vswhere_versions(conf, versions): try: import json except ImportError: Logs.error('Visual Studio 2017 detection requires Python 2.6') return prg_path = os.environ.get('ProgramFiles(x86)', os.environ.get('ProgramFiles', 'C:\\Program Files (x86)')) vswhere = os.path.join(prg_path, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe') args = [vswhere, '-products', '*', '-legacy', '-format', 'json'] try: txt = conf.cmd_and_log(args) except Errors.WafError as e: Logs.debug('msvc: vswhere.exe failed %s', e) return if sys.version_info[0] < 3: txt = txt.decode(Utils.console_encoding()) arr = json.loads(txt) arr.sort(key=lambda x: x['installationVersion']) for entry in arr: ver = entry['installationVersion'] ver = str('.'.join(ver.split('.')[:2])) path = str(os.path.abspath(entry['installationPath'])) if os.path.exists(path) and ('msvc %s' % ver) not in versions: conf.gather_msvc_targets(versions, ver, path) @conf def gather_msvc_versions(conf, versions): vc_paths = [] for (v,version,reg) in gather_msvc_detected_versions(): try: try: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\VC") except OSError: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\Microsoft Visual C++") path,type = Utils.winreg.QueryValueEx(msvc_version, 'ProductDir') except OSError: try: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Wow6432node\\Microsoft\\VisualStudio\\SxS\\VS7") path,type = Utils.winreg.QueryValueEx(msvc_version, version) except OSError: continue else: vc_paths.append((version, os.path.abspath(str(path)))) continue else: vc_paths.append((version, os.path.abspath(str(path)))) wince_supported_platforms = gather_wince_supported_platforms() for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) vsvars = os.path.join(vs_path, 'Common7', 'Tools', 'vsvars32.bat') if wince_supported_platforms and os.path.isfile(vsvars): conf.gather_wince_targets(versions, version, vc_path, vsvars, wince_supported_platforms) # WP80 works with 11.0Exp and 11.0, both of which resolve to the same vc_path. # Stop after one is found. for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) vsvars = os.path.join(vs_path, 'VC', 'WPSDK', 'WP80', 'vcvarsphoneall.bat') if os.path.isfile(vsvars): conf.gather_winphone_targets(versions, '8.0', vc_path, vsvars) break for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) conf.gather_msvc_targets(versions, version, vc_path) @conf def gather_icl_versions(conf, versions): """ Checks ICL compilers :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^...?.?\....?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_icl_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version) path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) for target,arch in all_icl_platforms: try: icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: continue else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) major = version[0:2] versions['intel ' + major] = targets @conf def gather_intel_composer_versions(conf, versions): """ Checks ICL compilers that are part of Intel Composer Suites :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^...?.?\...?.?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Suites') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Suites') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_icl_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: try: defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\'+targetDir) except OSError: if targetDir == 'EM64T_NATIVE': defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\EM64T') else: raise uid,type = Utils.winreg.QueryValueEx(defaults, 'SubKey') Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++') path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) # The intel compilervar_arch.bat is broken when used with Visual Studio Express 2012 # http://software.intel.com/en-us/forums/topic/328487 compilervars_warning_attr = '_compilervars_warning_key' if version[0:2] == '13' and getattr(conf, compilervars_warning_attr, True): setattr(conf, compilervars_warning_attr, False) patch_url = 'http://software.intel.com/en-us/forums/topic/328487' compilervars_arch = os.path.join(path, 'bin', 'compilervars_arch.bat') for vscomntool in ('VS110COMNTOOLS', 'VS100COMNTOOLS'): if vscomntool in os.environ: vs_express_path = os.environ[vscomntool] + r'..\IDE\VSWinExpress.exe' dev_env_path = os.environ[vscomntool] + r'..\IDE\devenv.exe' if (r'if exist "%VS110COMNTOOLS%..\IDE\VSWinExpress.exe"' in Utils.readf(compilervars_arch) and not os.path.exists(vs_express_path) and not os.path.exists(dev_env_path)): Logs.warn(('The Intel compilervar_arch.bat only checks for one Visual Studio SKU ' '(VSWinExpress.exe) but it does not seem to be installed at %r. ' 'The intel command line set up will fail to configure unless the file %r' 'is patched. See: %s') % (vs_express_path, compilervars_arch, patch_url)) major = version[0:2] versions['intel ' + major] = targets @conf def detect_msvc(self): return self.setup_msvc(self.get_msvc_versions()) @conf def get_msvc_versions(self): """ :return: platform to compiler configurations :rtype: dict """ dct = Utils.ordered_iter_dict() self.gather_icl_versions(dct) self.gather_intel_composer_versions(dct) self.gather_wsdk_versions(dct) self.gather_msvc_versions(dct) self.gather_vswhere_versions(dct) Logs.debug('msvc: detected versions %r', list(dct.keys())) return dct @conf def find_lt_names_msvc(self, libname, is_static=False): """ Win32/MSVC specific code to glean out information from libtool la files. this function is not attached to the task_gen class. Returns a triplet: (library absolute path, library name without extension, whether the library is static) """ lt_names=[ 'lib%s.la' % libname, '%s.la' % libname, ] for path in self.env.LIBPATH: for la in lt_names: laf=os.path.join(path,la) dll=None if os.path.exists(laf): ltdict = Utils.read_la_file(laf) lt_libdir=None if ltdict.get('libdir', ''): lt_libdir = ltdict['libdir'] if not is_static and ltdict.get('library_names', ''): dllnames=ltdict['library_names'].split() dll=dllnames[0].lower() dll=re.sub(r'\.dll$', '', dll) return (lt_libdir, dll, False) elif ltdict.get('old_library', ''): olib=ltdict['old_library'] if os.path.exists(os.path.join(path,olib)): return (path, olib, True) elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)): return (lt_libdir, olib, True) else: return (None, olib, True) else: raise self.errors.WafError('invalid libtool object file: %s' % laf) return (None, None, None) @conf def libname_msvc(self, libname, is_static=False): lib = libname.lower() lib = re.sub(r'\.lib$','',lib) if lib in g_msvc_systemlibs: return lib lib=re.sub('^lib','',lib) if lib == 'm': return None (lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static) if lt_path != None and lt_libname != None: if lt_static: # file existence check has been made by find_lt_names return os.path.join(lt_path,lt_libname) if lt_path != None: _libpaths = [lt_path] + self.env.LIBPATH else: _libpaths = self.env.LIBPATH static_libs=[ 'lib%ss.lib' % lib, 'lib%s.lib' % lib, '%ss.lib' % lib, '%s.lib' %lib, ] dynamic_libs=[ 'lib%s.dll.lib' % lib, 'lib%s.dll.a' % lib, '%s.dll.lib' % lib, '%s.dll.a' % lib, 'lib%s_d.lib' % lib, '%s_d.lib' % lib, '%s.lib' %lib, ] libnames=static_libs if not is_static: libnames=dynamic_libs + static_libs for path in _libpaths: for libn in libnames: if os.path.exists(os.path.join(path, libn)): Logs.debug('msvc: lib found: %s', os.path.join(path,libn)) return re.sub(r'\.lib$', '',libn) #if no lib can be found, just return the libname as msvc expects it self.fatal('The library %r could not be found' % libname) return re.sub(r'\.lib$', '', libname) @conf def check_lib_msvc(self, libname, is_static=False, uselib_store=None): """ Ideally we should be able to place the lib in the right env var, either STLIB or LIB, but we don't distinguish static libs from shared libs. This is ok since msvc doesn't have any special linker flag to select static libs (no env.STLIB_MARKER) """ libn = self.libname_msvc(libname, is_static) if not uselib_store: uselib_store = libname.upper() if False and is_static: # disabled self.env['STLIB_' + uselib_store] = [libn] else: self.env['LIB_' + uselib_store] = [libn] @conf def check_libs_msvc(self, libnames, is_static=False): for libname in Utils.to_list(libnames): self.check_lib_msvc(libname, is_static) def configure(conf): """ Configuration methods to call for detecting msvc """ conf.autodetect(True) conf.find_msvc() conf.msvc_common_flags() conf.cc_load_tools() conf.cxx_load_tools() conf.cc_add_flags() conf.cxx_add_flags() conf.link_add_flags() conf.visual_studio_add_flags() @conf def no_autodetect(conf): conf.env.NO_MSVC_DETECT = 1 configure(conf) @conf def autodetect(conf, arch=False): v = conf.env if v.NO_MSVC_DETECT: return compiler, version, path, includes, libdirs, cpu = conf.detect_msvc() if arch: v.DEST_CPU = cpu v.PATH = path v.INCLUDES = includes v.LIBPATH = libdirs v.MSVC_COMPILER = compiler try: v.MSVC_VERSION = float(version) except ValueError: v.MSVC_VERSION = float(version[:-3]) def _get_prog_names(conf, compiler): if compiler == 'intel': compiler_name = 'ICL' linker_name = 'XILINK' lib_name = 'XILIB' else: # assumes CL.exe compiler_name = 'CL' linker_name = 'LINK' lib_name = 'LIB' return compiler_name, linker_name, lib_name @conf def find_msvc(conf): """Due to path format limitations, limit operation only to native Win32. Yeah it sucks.""" if sys.platform == 'cygwin': conf.fatal('MSVC module does not work under cygwin Python!') # the autodetection is supposed to be performed before entering in this method v = conf.env path = v.PATH compiler = v.MSVC_COMPILER version = v.MSVC_VERSION compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) v.MSVC_MANIFEST = (compiler == 'msvc' and version >= 8) or (compiler == 'wsdk' and version >= 6) or (compiler == 'intel' and version >= 11) # compiler cxx = conf.find_program(compiler_name, var='CXX', path_list=path) # before setting anything, check if the compiler is really msvc env = dict(conf.environ) if path: env.update(PATH = ';'.join(path)) if not conf.cmd_and_log(cxx + ['/nologo', '/help'], env=env): conf.fatal('the msvc compiler could not be identified') # c/c++ compiler v.CC = v.CXX = cxx v.CC_NAME = v.CXX_NAME = 'msvc' # linker if not v.LINK_CXX: conf.find_program(linker_name, path_list=path, errmsg='%s was not found (linker)' % linker_name, var='LINK_CXX') if not v.LINK_CC: v.LINK_CC = v.LINK_CXX # staticlib linker if not v.AR: stliblink = conf.find_program(lib_name, path_list=path, var='AR') if not stliblink: return v.ARFLAGS = ['/nologo'] # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later if v.MSVC_MANIFEST: conf.find_program('MT', path_list=path, var='MT') v.MTFLAGS = ['/nologo'] try: conf.load('winres') except Errors.ConfigurationError: Logs.warn('Resource compiler not found. Compiling resource file is disabled') @conf def visual_studio_add_flags(self): """visual studio flags found in the system environment""" v = self.env if self.environ.get('INCLUDE'): v.prepend_value('INCLUDES', [x for x in self.environ['INCLUDE'].split(';') if x]) # notice the 'S' if self.environ.get('LIB'): v.prepend_value('LIBPATH', [x for x in self.environ['LIB'].split(';') if x]) @conf def msvc_common_flags(conf): """ Setup the flags required for executing the msvc compiler """ v = conf.env v.DEST_BINFMT = 'pe' v.append_value('CFLAGS', ['/nologo']) v.append_value('CXXFLAGS', ['/nologo']) v.append_value('LINKFLAGS', ['/nologo']) v.DEFINES_ST = '/D%s' v.CC_SRC_F = '' v.CC_TGT_F = ['/c', '/Fo'] v.CXX_SRC_F = '' v.CXX_TGT_F = ['/c', '/Fo'] if (v.MSVC_COMPILER == 'msvc' and v.MSVC_VERSION >= 8) or (v.MSVC_COMPILER == 'wsdk' and v.MSVC_VERSION >= 6): v.CC_TGT_F = ['/FC'] + v.CC_TGT_F v.CXX_TGT_F = ['/FC'] + v.CXX_TGT_F v.CPPPATH_ST = '/I%s' # template for adding include paths v.AR_TGT_F = v.CCLNK_TGT_F = v.CXXLNK_TGT_F = '/OUT:' # CRT specific flags v.CFLAGS_CRT_MULTITHREADED = v.CXXFLAGS_CRT_MULTITHREADED = ['/MT'] v.CFLAGS_CRT_MULTITHREADED_DLL = v.CXXFLAGS_CRT_MULTITHREADED_DLL = ['/MD'] v.CFLAGS_CRT_MULTITHREADED_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DBG = ['/MTd'] v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = ['/MDd'] v.LIB_ST = '%s.lib' v.LIBPATH_ST = '/LIBPATH:%s' v.STLIB_ST = '%s.lib' v.STLIBPATH_ST = '/LIBPATH:%s' if v.MSVC_MANIFEST: v.append_value('LINKFLAGS', ['/MANIFEST']) v.CFLAGS_cshlib = [] v.CXXFLAGS_cxxshlib = [] v.LINKFLAGS_cshlib = v.LINKFLAGS_cxxshlib = ['/DLL'] v.cshlib_PATTERN = v.cxxshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.lib' v.IMPLIB_ST = '/IMPLIB:%s' v.LINKFLAGS_cstlib = [] v.cstlib_PATTERN = v.cxxstlib_PATTERN = '%s.lib' v.cprogram_PATTERN = v.cxxprogram_PATTERN = '%s.exe' v.def_PATTERN = '/def:%s' ####################################################################################################### ##### conf above, build below @after_method('apply_link') @feature('c', 'cxx') def apply_flags_msvc(self): """ Add additional flags implied by msvc, such as subsystems and pdb files:: def build(bld): bld.stlib(source='main.c', target='bar', subsystem='gruik') """ if self.env.CC_NAME != 'msvc' or not getattr(self, 'link_task', None): return is_static = isinstance(self.link_task, ccroot.stlink_task) subsystem = getattr(self, 'subsystem', '') if subsystem: subsystem = '/subsystem:%s' % subsystem flags = is_static and 'ARFLAGS' or 'LINKFLAGS' self.env.append_value(flags, subsystem) if not is_static: for f in self.env.LINKFLAGS: d = f.lower() if d[1:] in ('debug', 'debug:full', 'debug:fastlink'): pdbnode = self.link_task.outputs[0].change_ext('.pdb') self.link_task.outputs.append(pdbnode) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=pdbnode) break @feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib') @after_method('apply_link') def apply_manifest(self): """ Special linker for MSVC with support for embedding manifests into DLL's and executables compiled by Visual Studio 2005 or probably later. Without the manifest file, the binaries are unusable. See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx """ if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST and getattr(self, 'link_task', None): out_node = self.link_task.outputs[0] man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') self.link_task.outputs.append(man_node) self.env.DO_MANIFEST = True def make_winapp(self, family): append = self.env.append_unique append('DEFINES', 'WINAPI_FAMILY=%s' % family) append('CXXFLAGS', ['/ZW', '/TP']) for lib_path in self.env.LIBPATH: append('CXXFLAGS','/AI%s'%lib_path) @feature('winphoneapp') @after_method('process_use') @after_method('propagate_uselib_vars') def make_winphone_app(self): """ Insert configuration flags for windows phone applications (adds /ZW, /TP...) """ make_winapp(self, 'WINAPI_FAMILY_PHONE_APP') self.env.append_unique('LINKFLAGS', ['/NODEFAULTLIB:ole32.lib', 'PhoneAppModelHost.lib']) @feature('winapp') @after_method('process_use') @after_method('propagate_uselib_vars') def make_windows_app(self): """ Insert configuration flags for windows applications (adds /ZW, /TP...) """ make_winapp(self, 'WINAPI_FAMILY_DESKTOP_APP') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.2574158 tevent-0.11.0/third_party/waf/waflib/Tools/nasm.py0000660000000000000000000000133500000000000022017 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) """ Nasm tool (asm processing) """ import os import waflib.Tools.asm # leave this from waflib.TaskGen import feature @feature('asm') def apply_nasm_vars(self): """provided for compatibility""" self.env.append_value('ASFLAGS', self.to_list(getattr(self, 'nasm_flags', []))) def configure(conf): """ Detect nasm/yasm and set the variable *AS* """ conf.find_program(['nasm', 'yasm'], var='AS') conf.env.AS_TGT_F = ['-o'] conf.env.ASLNK_TGT_F = ['-o'] conf.load('asm') conf.env.ASMPATH_ST = '-I%s' + os.sep txt = conf.cmd_and_log(conf.env.AS + ['--version']) if 'yasm' in txt.lower(): conf.env.ASM_NAME = 'yasm' else: conf.env.ASM_NAME = 'nasm' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/nobuild.py0000660000000000000000000000064300000000000022516 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2015 (ita) """ Override the build commands to write empty files. This is useful for profiling and evaluating the Python overhead. To use:: def build(bld): ... bld.load('nobuild') """ from waflib import Task def build(bld): def run(self): for x in self.outputs: x.write('') for (name, cls) in Task.classes.items(): cls.run = run ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/perl.py0000660000000000000000000001064500000000000022027 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # andersg at 0x63.nu 2007 # Thomas Nagy 2016-2018 (ita) """ Support for Perl extensions. A C/C++ compiler is required:: def options(opt): opt.load('compiler_c perl') def configure(conf): conf.load('compiler_c perl') conf.check_perl_version((5,6,0)) conf.check_perl_ext_devel() conf.check_perl_module('Cairo') conf.check_perl_module('Devel::PPPort 4.89') def build(bld): bld( features = 'c cshlib perlext', source = 'Mytest.xs', target = 'Mytest', install_path = '${ARCHDIR_PERL}/auto') bld.install_files('${ARCHDIR_PERL}', 'Mytest.pm') """ import os from waflib import Task, Options, Utils, Errors from waflib.Configure import conf from waflib.TaskGen import extension, feature, before_method @before_method('apply_incpaths', 'apply_link', 'propagate_uselib_vars') @feature('perlext') def init_perlext(self): """ Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the *lib* prefix from library names. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PERLEXT' in self.uselib: self.uselib.append('PERLEXT') self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.perlext_PATTERN @extension('.xs') def xsubpp_file(self, node): """ Create :py:class:`waflib.Tools.perl.xsubpp` tasks to process *.xs* files """ outnode = node.change_ext('.c') self.create_task('xsubpp', node, outnode) self.source.append(outnode) class xsubpp(Task.Task): """ Process *.xs* files """ run_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}' color = 'BLUE' ext_out = ['.h'] @conf def check_perl_version(self, minver=None): """ Check if Perl is installed, and set the variable PERL. minver is supposed to be a tuple """ res = True if minver: cver = '.'.join(map(str,minver)) else: cver = '' self.start_msg('Checking for minimum perl version %s' % cver) perl = self.find_program('perl', var='PERL', value=getattr(Options.options, 'perlbinary', None)) version = self.cmd_and_log(perl + ["-e", 'printf \"%vd\", $^V']) if not version: res = False version = "Unknown" elif not minver is None: ver = tuple(map(int, version.split("."))) if ver < minver: res = False self.end_msg(version, color=res and 'GREEN' or 'YELLOW') return res @conf def check_perl_module(self, module): """ Check if specified perlmodule is installed. The minimum version can be specified by specifying it after modulename like this:: def configure(conf): conf.check_perl_module("Some::Module 2.92") """ cmd = self.env.PERL + ['-e', 'use %s' % module] self.start_msg('perl module %s' % module) try: r = self.cmd_and_log(cmd) except Errors.WafError: self.end_msg(False) return None self.end_msg(r or True) return r @conf def check_perl_ext_devel(self): """ Check for configuration needed to build perl extensions. Sets different xxx_PERLEXT variables in the environment. Also sets the ARCHDIR_PERL variable useful as installation path, which can be overridden by ``--with-perl-archdir`` option. """ env = self.env perl = env.PERL if not perl: self.fatal('find perl first') def cmd_perl_config(s): return perl + ['-MConfig', '-e', 'print \"%s\"' % s] def cfg_str(cfg): return self.cmd_and_log(cmd_perl_config(cfg)) def cfg_lst(cfg): return Utils.to_list(cfg_str(cfg)) def find_xsubpp(): for var in ('privlib', 'vendorlib'): xsubpp = cfg_lst('$Config{%s}/ExtUtils/xsubpp$Config{exe_ext}' % var) if xsubpp and os.path.isfile(xsubpp[0]): return xsubpp return self.find_program('xsubpp') env.LINKFLAGS_PERLEXT = cfg_lst('$Config{lddlflags}') env.INCLUDES_PERLEXT = cfg_lst('$Config{archlib}/CORE') env.CFLAGS_PERLEXT = cfg_lst('$Config{ccflags} $Config{cccdlflags}') env.EXTUTILS_TYPEMAP = cfg_lst('$Config{privlib}/ExtUtils/typemap') env.XSUBPP = find_xsubpp() if not getattr(Options.options, 'perlarchdir', None): env.ARCHDIR_PERL = cfg_str('$Config{sitearch}') else: env.ARCHDIR_PERL = getattr(Options.options, 'perlarchdir') env.perlext_PATTERN = '%s.' + cfg_str('$Config{dlext}') def options(opt): """ Add the ``--with-perl-archdir`` and ``--with-perl-binary`` command-line options. """ opt.add_option('--with-perl-binary', type='string', dest='perlbinary', help = 'Specify alternate perl binary', default=None) opt.add_option('--with-perl-archdir', type='string', dest='perlarchdir', help = 'Specify directory where to install arch specific files', default=None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Tools/python.py0000660000000000000000000005400000000000000022377 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2007-2015 (ita) # Gustavo Carneiro (gjc), 2007 """ Support for Python, detect the headers and libraries and provide *use* variables to link C/C++ programs against them:: def options(opt): opt.load('compiler_c python') def configure(conf): conf.load('compiler_c python') conf.check_python_version((2,4,2)) conf.check_python_headers() def build(bld): bld.program(features='pyembed', source='a.c', target='myprog') bld.shlib(features='pyext', source='b.c', target='mylib') """ import os, sys from waflib import Errors, Logs, Node, Options, Task, Utils from waflib.TaskGen import extension, before_method, after_method, feature from waflib.Configure import conf FRAG = ''' #include #ifdef __cplusplus extern "C" { #endif void Py_Initialize(void); void Py_Finalize(void); #ifdef __cplusplus } #endif int main(int argc, char **argv) { (void)argc; (void)argv; Py_Initialize(); Py_Finalize(); return 0; } ''' """ Piece of C/C++ code used in :py:func:`waflib.Tools.python.check_python_headers` """ INST = ''' import sys, py_compile py_compile.compile(sys.argv[1], sys.argv[2], sys.argv[3], True) ''' """ Piece of Python code used in :py:class:`waflib.Tools.python.pyo` and :py:class:`waflib.Tools.python.pyc` for byte-compiling python files """ DISTUTILS_IMP = ['from distutils.sysconfig import get_config_var, get_python_lib'] @before_method('process_source') @feature('py') def feature_py(self): """ Create tasks to byte-compile .py files and install them, if requested """ self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') install_from = getattr(self, 'install_from', None) if install_from and not isinstance(install_from, Node.Node): install_from = self.path.find_dir(install_from) self.install_from = install_from ver = self.env.PYTHON_VERSION if not ver: self.bld.fatal('Installing python files requires PYTHON_VERSION, try conf.check_python_version') if int(ver.replace('.', '')) > 31: self.install_32 = True @extension('.py') def process_py(self, node): """ Add signature of .py file, so it will be byte-compiled when necessary """ assert(hasattr(self, 'install_path')), 'add features="py" for target "%s" in "%s/wscript".' % (self.target, self.path.nice_path()) self.install_from = getattr(self, 'install_from', None) relative_trick = getattr(self, 'relative_trick', True) if self.install_from: assert isinstance(self.install_from, Node.Node), \ 'add features="py" for target "%s" in "%s/wscript" (%s).' % (self.target, self.path.nice_path(), type(self.install_from)) # where to install the python file if self.install_path: if self.install_from: self.add_install_files(install_to=self.install_path, install_from=node, cwd=self.install_from, relative_trick=relative_trick) else: self.add_install_files(install_to=self.install_path, install_from=node, relative_trick=relative_trick) lst = [] if self.env.PYC: lst.append('pyc') if self.env.PYO: lst.append('pyo') if self.install_path: if self.install_from: target_dir = node.path_from(self.install_from) if relative_trick else node.name pyd = Utils.subst_vars("%s/%s" % (self.install_path, target_dir), self.env) else: target_dir = node.path_from(self.path) if relative_trick else node.name pyd = Utils.subst_vars("%s/%s" % (self.install_path, target_dir), self.env) else: pyd = node.abspath() for ext in lst: if self.env.PYTAG and not self.env.NOPYCACHE: # __pycache__ installation for python 3.2 - PEP 3147 name = node.name[:-3] pyobj = node.parent.get_bld().make_node('__pycache__').make_node("%s.%s.%s" % (name, self.env.PYTAG, ext)) pyobj.parent.mkdir() else: pyobj = node.change_ext(".%s" % ext) tsk = self.create_task(ext, node, pyobj) tsk.pyd = pyd if self.install_path: self.add_install_files(install_to=os.path.dirname(pyd), install_from=pyobj, cwd=node.parent.get_bld(), relative_trick=relative_trick) class pyc(Task.Task): """ Byte-compiling python files """ color = 'PINK' def __str__(self): node = self.outputs[0] return node.path_from(node.ctx.launch_node()) def run(self): cmd = [Utils.subst_vars('${PYTHON}', self.env), '-c', INST, self.inputs[0].abspath(), self.outputs[0].abspath(), self.pyd] ret = self.generator.bld.exec_command(cmd) return ret class pyo(Task.Task): """ Byte-compiling python files """ color = 'PINK' def __str__(self): node = self.outputs[0] return node.path_from(node.ctx.launch_node()) def run(self): cmd = [Utils.subst_vars('${PYTHON}', self.env), Utils.subst_vars('${PYFLAGS_OPT}', self.env), '-c', INST, self.inputs[0].abspath(), self.outputs[0].abspath(), self.pyd] ret = self.generator.bld.exec_command(cmd) return ret @feature('pyext') @before_method('propagate_uselib_vars', 'apply_link') @after_method('apply_bundle') def init_pyext(self): """ Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the *lib* prefix from library names. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PYEXT' in self.uselib: self.uselib.append('PYEXT') # override shlib_PATTERN set by the osx module self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN = self.env.pyext_PATTERN self.env.fcshlib_PATTERN = self.env.dshlib_PATTERN = self.env.pyext_PATTERN try: if not self.install_path: return except AttributeError: self.install_path = '${PYTHONARCHDIR}' @feature('pyext') @before_method('apply_link', 'apply_bundle') def set_bundle(self): """Mac-specific pyext extension that enables bundles from c_osx.py""" if Utils.unversioned_sys_platform() == 'darwin': self.mac_bundle = True @before_method('propagate_uselib_vars') @feature('pyembed') def init_pyembed(self): """ Add the PYEMBED variable. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PYEMBED' in self.uselib: self.uselib.append('PYEMBED') @conf def get_python_variables(self, variables, imports=None): """ Spawn a new python process to dump configuration variables :param variables: variables to print :type variables: list of string :param imports: one import by element :type imports: list of string :return: the variable values :rtype: list of string """ if not imports: try: imports = self.python_imports except AttributeError: imports = DISTUTILS_IMP program = list(imports) # copy program.append('') for v in variables: program.append("print(repr(%s))" % v) os_env = dict(os.environ) try: del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool except KeyError: pass try: out = self.cmd_and_log(self.env.PYTHON + ['-c', '\n'.join(program)], env=os_env) except Errors.WafError: self.fatal('The distutils module is unusable: install "python-devel"?') self.to_log(out) return_values = [] for s in out.splitlines(): s = s.strip() if not s: continue if s == 'None': return_values.append(None) elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'): return_values.append(eval(s)) elif s[0].isdigit(): return_values.append(int(s)) else: break return return_values @conf def test_pyembed(self, mode, msg='Testing pyembed configuration'): self.check(header_name='Python.h', define_name='HAVE_PYEMBED', msg=msg, fragment=FRAG, errmsg='Could not build a python embedded interpreter', features='%s %sprogram pyembed' % (mode, mode)) @conf def test_pyext(self, mode, msg='Testing pyext configuration'): self.check(header_name='Python.h', define_name='HAVE_PYEXT', msg=msg, fragment=FRAG, errmsg='Could not build python extensions', features='%s %sshlib pyext' % (mode, mode)) @conf def python_cross_compile(self, features='pyembed pyext'): """ For cross-compilation purposes, it is possible to bypass the normal detection and set the flags that you want: PYTHON_VERSION='3.4' PYTAG='cpython34' pyext_PATTERN="%s.so" PYTHON_LDFLAGS='-lpthread -ldl' waf configure The following variables are used: PYTHON_VERSION required PYTAG required PYTHON_LDFLAGS required pyext_PATTERN required PYTHON_PYEXT_LDFLAGS PYTHON_PYEMBED_LDFLAGS """ features = Utils.to_list(features) if not ('PYTHON_LDFLAGS' in self.environ or 'PYTHON_PYEXT_LDFLAGS' in self.environ or 'PYTHON_PYEMBED_LDFLAGS' in self.environ): return False for x in 'PYTHON_VERSION PYTAG pyext_PATTERN'.split(): if not x in self.environ: self.fatal('Please set %s in the os environment' % x) else: self.env[x] = self.environ[x] xx = self.env.CXX_NAME and 'cxx' or 'c' if 'pyext' in features: flags = self.environ.get('PYTHON_PYEXT_LDFLAGS', self.environ.get('PYTHON_LDFLAGS')) if flags is None: self.fatal('No flags provided through PYTHON_PYEXT_LDFLAGS as required') else: self.parse_flags(flags, 'PYEXT') self.test_pyext(xx) if 'pyembed' in features: flags = self.environ.get('PYTHON_PYEMBED_LDFLAGS', self.environ.get('PYTHON_LDFLAGS')) if flags is None: self.fatal('No flags provided through PYTHON_PYEMBED_LDFLAGS as required') else: self.parse_flags(flags, 'PYEMBED') self.test_pyembed(xx) return True @conf def check_python_headers(conf, features='pyembed pyext'): """ Check for headers and libraries necessary to extend or embed python by using the module *distutils*. On success the environment variables xxx_PYEXT and xxx_PYEMBED are added: * PYEXT: for compiling python extensions * PYEMBED: for embedding a python interpreter """ features = Utils.to_list(features) assert ('pyembed' in features) or ('pyext' in features), "check_python_headers features must include 'pyembed' and/or 'pyext'" env = conf.env if not env.CC_NAME and not env.CXX_NAME: conf.fatal('load a compiler first (gcc, g++, ..)') # bypass all the code below for cross-compilation if conf.python_cross_compile(features): return if not env.PYTHON_VERSION: conf.check_python_version() pybin = env.PYTHON if not pybin: conf.fatal('Could not find the python executable') # so we actually do all this for compatibility reasons and for obtaining pyext_PATTERN below v = 'prefix SO LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS LDVERSION'.split() try: lst = conf.get_python_variables(["get_config_var('%s') or ''" % x for x in v]) except RuntimeError: conf.fatal("Python development headers not found (-v for details).") vals = ['%s = %r' % (x, y) for (x, y) in zip(v, lst)] conf.to_log("Configuration returned from %r:\n%s\n" % (pybin, '\n'.join(vals))) dct = dict(zip(v, lst)) x = 'MACOSX_DEPLOYMENT_TARGET' if dct[x]: env[x] = conf.environ[x] = dct[x] env.pyext_PATTERN = '%s' + dct['SO'] # not a mistake # Try to get pythonX.Y-config num = '.'.join(env.PYTHON_VERSION.split('.')[:2]) conf.find_program([''.join(pybin) + '-config', 'python%s-config' % num, 'python-config-%s' % num, 'python%sm-config' % num], var='PYTHON_CONFIG', msg="python-config", mandatory=False) if env.PYTHON_CONFIG: # check python-config output only once if conf.env.HAVE_PYTHON_H: return # python2.6-config requires 3 runs all_flags = [['--cflags', '--libs', '--ldflags']] if sys.hexversion < 0x2070000: all_flags = [[k] for k in all_flags[0]] xx = env.CXX_NAME and 'cxx' or 'c' if 'pyembed' in features: for flags in all_flags: # Python 3.8 has different flags for pyembed, needs --embed embedflags = flags + ['--embed'] try: conf.check_cfg(msg='Asking python-config for pyembed %r flags' % ' '.join(embedflags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEMBED', args=embedflags) except conf.errors.ConfigurationError: # However Python < 3.8 doesn't accept --embed, so we need a fallback conf.check_cfg(msg='Asking python-config for pyembed %r flags' % ' '.join(flags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEMBED', args=flags) try: conf.test_pyembed(xx) except conf.errors.ConfigurationError: # python bug 7352 if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']: env.append_unique('LIBPATH_PYEMBED', [dct['LIBDIR']]) conf.test_pyembed(xx) else: raise if 'pyext' in features: for flags in all_flags: conf.check_cfg(msg='Asking python-config for pyext %r flags' % ' '.join(flags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEXT', args=flags) try: conf.test_pyext(xx) except conf.errors.ConfigurationError: # python bug 7352 if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']: env.append_unique('LIBPATH_PYEXT', [dct['LIBDIR']]) conf.test_pyext(xx) else: raise conf.define('HAVE_PYTHON_H', 1) return # No python-config, do something else on windows systems all_flags = dct['LDFLAGS'] + ' ' + dct['CFLAGS'] conf.parse_flags(all_flags, 'PYEMBED') all_flags = dct['LDFLAGS'] + ' ' + dct['LDSHARED'] + ' ' + dct['CFLAGS'] conf.parse_flags(all_flags, 'PYEXT') result = None if not dct["LDVERSION"]: dct["LDVERSION"] = env.PYTHON_VERSION # further simplification will be complicated for name in ('python' + dct['LDVERSION'], 'python' + env.PYTHON_VERSION + 'm', 'python' + env.PYTHON_VERSION.replace('.', '')): # LIBPATH_PYEMBED is already set; see if it works. if not result and env.LIBPATH_PYEMBED: path = env.LIBPATH_PYEMBED conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n" % path) result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBPATH_PYEMBED' % name) if not result and dct['LIBDIR']: path = [dct['LIBDIR']] conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n" % path) result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBDIR' % name) if not result and dct['LIBPL']: path = [dct['LIBPL']] conf.to_log("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n") result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in python_LIBPL' % name) if not result: path = [os.path.join(dct['prefix'], "libs")] conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n") result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in $prefix/libs' % name) if result: break # do not forget to set LIBPATH_PYEMBED if result: env.LIBPATH_PYEMBED = path env.append_value('LIB_PYEMBED', [name]) else: conf.to_log("\n\n### LIB NOT FOUND\n") # under certain conditions, python extensions must link to # python libraries, not just python embedding programs. if Utils.is_win32 or dct['Py_ENABLE_SHARED']: env.LIBPATH_PYEXT = env.LIBPATH_PYEMBED env.LIB_PYEXT = env.LIB_PYEMBED conf.to_log("Include path for Python extensions (found via distutils module): %r\n" % (dct['INCLUDEPY'],)) env.INCLUDES_PYEXT = [dct['INCLUDEPY']] env.INCLUDES_PYEMBED = [dct['INCLUDEPY']] # Code using the Python API needs to be compiled with -fno-strict-aliasing if env.CC_NAME == 'gcc': env.append_unique('CFLAGS_PYEMBED', ['-fno-strict-aliasing']) env.append_unique('CFLAGS_PYEXT', ['-fno-strict-aliasing']) if env.CXX_NAME == 'gcc': env.append_unique('CXXFLAGS_PYEMBED', ['-fno-strict-aliasing']) env.append_unique('CXXFLAGS_PYEXT', ['-fno-strict-aliasing']) if env.CC_NAME == "msvc": from distutils.msvccompiler import MSVCCompiler dist_compiler = MSVCCompiler() dist_compiler.initialize() env.append_value('CFLAGS_PYEXT', dist_compiler.compile_options) env.append_value('CXXFLAGS_PYEXT', dist_compiler.compile_options) env.append_value('LINKFLAGS_PYEXT', dist_compiler.ldflags_shared) # See if it compiles conf.check(header_name='Python.h', define_name='HAVE_PYTHON_H', uselib='PYEMBED', fragment=FRAG, errmsg='Distutils not installed? Broken python installation? Get python-config now!') @conf def check_python_version(conf, minver=None): """ Check if the python interpreter is found matching a given minimum version. minver should be a tuple, eg. to check for python >= 2.4.2 pass (2,4,2) as minver. If successful, PYTHON_VERSION is defined as 'MAJOR.MINOR' (eg. '2.4') of the actual python version found, and PYTHONDIR and PYTHONARCHDIR are defined, pointing to the site-packages directories appropriate for this python version, where modules/packages/extensions should be installed. :param minver: minimum version :type minver: tuple of int """ assert minver is None or isinstance(minver, tuple) pybin = conf.env.PYTHON if not pybin: conf.fatal('could not find the python executable') # Get python version string cmd = pybin + ['-c', 'import sys\nfor x in sys.version_info: print(str(x))'] Logs.debug('python: Running python command %r', cmd) lines = conf.cmd_and_log(cmd).split() assert len(lines) == 5, "found %r lines, expected 5: %r" % (len(lines), lines) pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4])) # Compare python version with the minimum required result = (minver is None) or (pyver_tuple >= minver) if result: # define useful environment variables pyver = '.'.join([str(x) for x in pyver_tuple[:2]]) conf.env.PYTHON_VERSION = pyver if 'PYTHONDIR' in conf.env: # Check if --pythondir was specified pydir = conf.env.PYTHONDIR elif 'PYTHONDIR' in conf.environ: # Check environment for PYTHONDIR pydir = conf.environ['PYTHONDIR'] else: # Finally, try to guess if Utils.is_win32: (python_LIBDEST, pydir) = conf.get_python_variables( ["get_config_var('LIBDEST') or ''", "get_python_lib(standard_lib=0) or ''"]) else: python_LIBDEST = None (pydir,) = conf.get_python_variables( ["get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX]) if python_LIBDEST is None: if conf.env.LIBDIR: python_LIBDEST = os.path.join(conf.env.LIBDIR, 'python' + pyver) else: python_LIBDEST = os.path.join(conf.env.PREFIX, 'lib', 'python' + pyver) if 'PYTHONARCHDIR' in conf.env: # Check if --pythonarchdir was specified pyarchdir = conf.env.PYTHONARCHDIR elif 'PYTHONARCHDIR' in conf.environ: # Check environment for PYTHONDIR pyarchdir = conf.environ['PYTHONARCHDIR'] else: # Finally, try to guess (pyarchdir, ) = conf.get_python_variables( ["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX]) if not pyarchdir: pyarchdir = pydir if hasattr(conf, 'define'): # conf.define is added by the C tool, so may not exist conf.define('PYTHONDIR', pydir) conf.define('PYTHONARCHDIR', pyarchdir) conf.env.PYTHONDIR = pydir conf.env.PYTHONARCHDIR = pyarchdir # Feedback pyver_full = '.'.join(map(str, pyver_tuple[:3])) if minver is None: conf.msg('Checking for python version', pyver_full) else: minver_str = '.'.join(map(str, minver)) conf.msg('Checking for python version >= %s' % (minver_str,), pyver_full, color=result and 'GREEN' or 'YELLOW') if not result: conf.fatal('The python version is too old, expecting %r' % (minver,)) PYTHON_MODULE_TEMPLATE = ''' import %s as current_module version = getattr(current_module, '__version__', None) if version is not None: print(str(version)) else: print('unknown version') ''' @conf def check_python_module(conf, module_name, condition=''): """ Check if the selected python interpreter can import the given python module:: def configure(conf): conf.check_python_module('pygccxml') conf.check_python_module('re', condition="ver > num(2, 0, 4) and ver <= num(3, 0, 0)") :param module_name: module :type module_name: string """ msg = "Checking for python module %r" % module_name if condition: msg = '%s (%s)' % (msg, condition) conf.start_msg(msg) try: ret = conf.cmd_and_log(conf.env.PYTHON + ['-c', PYTHON_MODULE_TEMPLATE % module_name]) except Errors.WafError: conf.end_msg(False) conf.fatal('Could not find the python module %r' % module_name) ret = ret.strip() if condition: conf.end_msg(ret) if ret == 'unknown version': conf.fatal('Could not check the %s version' % module_name) from distutils.version import LooseVersion def num(*k): if isinstance(k[0], int): return LooseVersion('.'.join([str(x) for x in k])) else: return LooseVersion(k[0]) d = {'num': num, 'ver': LooseVersion(ret)} ev = eval(condition, {}, d) if not ev: conf.fatal('The %s version does not satisfy the requirements' % module_name) else: if ret == 'unknown version': conf.end_msg(True) else: conf.end_msg(ret) def configure(conf): """ Detect the python interpreter """ v = conf.env if getattr(Options.options, 'pythondir', None): v.PYTHONDIR = Options.options.pythondir if getattr(Options.options, 'pythonarchdir', None): v.PYTHONARCHDIR = Options.options.pythonarchdir if getattr(Options.options, 'nopycache', None): v.NOPYCACHE=Options.options.nopycache if not v.PYTHON: v.PYTHON = [getattr(Options.options, 'python', None) or sys.executable] v.PYTHON = Utils.to_list(v.PYTHON) conf.find_program('python', var='PYTHON') v.PYFLAGS = '' v.PYFLAGS_OPT = '-O' v.PYC = getattr(Options.options, 'pyc', 1) v.PYO = getattr(Options.options, 'pyo', 1) try: v.PYTAG = conf.cmd_and_log(conf.env.PYTHON + ['-c', "import sys\ntry:\n print(sys.implementation.cache_tag)\nexcept AttributeError:\n import imp\n print(imp.get_tag())\n"]).strip() except Errors.WafError: pass def options(opt): """ Add python-specific options """ pyopt=opt.add_option_group("Python Options") pyopt.add_option('--nopyc', dest = 'pyc', action='store_false', default=1, help = 'Do not install bytecode compiled .pyc files (configuration) [Default:install]') pyopt.add_option('--nopyo', dest='pyo', action='store_false', default=1, help='Do not install optimised compiled .pyo files (configuration) [Default:install]') pyopt.add_option('--nopycache',dest='nopycache', action='store_true', help='Do not use __pycache__ directory to install objects [Default:auto]') pyopt.add_option('--python', dest="python", help='python binary to be used [Default: %s]' % sys.executable) pyopt.add_option('--pythondir', dest='pythondir', help='Installation path for python modules (py, platform-independent .py and .pyc files)') pyopt.add_option('--pythonarchdir', dest='pythonarchdir', help='Installation path for python extension (pyext, platform-dependent .so or .dylib files)') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/Tools/qt5.py0000660000000000000000000005770400000000000021605 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ This tool helps with finding Qt5 tools and libraries, and also provides syntactic sugar for using Qt5 tools. The following snippet illustrates the tool usage:: def options(opt): opt.load('compiler_cxx qt5') def configure(conf): conf.load('compiler_cxx qt5') def build(bld): bld( features = 'qt5 cxx cxxprogram', uselib = 'QT5CORE QT5GUI QT5OPENGL QT5SVG', source = 'main.cpp textures.qrc aboutDialog.ui', target = 'window', ) Here, the UI description and resource files will be processed to generate code. Usage ===== Load the "qt5" tool. You also need to edit your sources accordingly: - the normal way of doing things is to have your C++ files include the .moc file. This is regarded as the best practice (and provides much faster compilations). It also implies that the include paths have beenset properly. - to have the include paths added automatically, use the following:: from waflib.TaskGen import feature, before_method, after_method @feature('cxx') @after_method('process_source') @before_method('apply_incpaths') def add_includes_paths(self): incs = set(self.to_list(getattr(self, 'includes', ''))) for x in self.compiled_tasks: incs.add(x.inputs[0].parent.path_from(self.path)) self.includes = sorted(incs) Note: another tool provides Qt processing that does not require .moc includes, see 'playground/slow_qt/'. A few options (--qt{dir,bin,...}) and environment variables (QT5_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool, tool path selection, etc; please read the source for more info. The detection uses pkg-config on Linux by default. The list of libraries to be requested to pkg-config is formulated by scanning in the QTLIBS directory (that can be passed via --qtlibs or by setting the environment variable QT5_LIBDIR otherwise is derived by querying qmake for QT_INSTALL_LIBS directory) for shared/static libraries present. Alternatively the list of libraries to be requested via pkg-config can be set using the qt5_vars attribute, ie: conf.qt5_vars = ['Qt5Core', 'Qt5Gui', 'Qt5Widgets', 'Qt5Test']; This can speed up configuration phase if needed libraries are known beforehand, can improve detection on systems with a sparse QT5 libraries installation (ie. NIX) and can improve detection of some header-only Qt modules (ie. Qt5UiPlugin). To force static library detection use: QT5_XCOMPILE=1 QT5_FORCE_STATIC=1 waf configure """ from __future__ import with_statement try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True import os, sys, re from waflib.Tools import cxx from waflib import Build, Task, Utils, Options, Errors, Context from waflib.TaskGen import feature, after_method, extension, before_method from waflib.Configure import conf from waflib import Logs MOC_H = ['.h', '.hpp', '.hxx', '.hh'] """ File extensions associated to .moc files """ EXT_RCC = ['.qrc'] """ File extension for the resource (.qrc) files """ EXT_UI = ['.ui'] """ File extension for the user interface (.ui) files """ EXT_QT5 = ['.cpp', '.cc', '.cxx', '.C'] """ File extensions of C++ files that may require a .moc processing """ class qxx(Task.classes['cxx']): """ Each C++ file can have zero or several .moc files to create. They are known only when the files are scanned (preprocessor) To avoid scanning the c++ files each time (parsing C/C++), the results are retrieved from the task cache (bld.node_deps/bld.raw_deps). The moc tasks are also created *dynamically* during the build. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.moc_done = 0 def runnable_status(self): """ Compute the task signature to make sure the scanner was executed. Create the moc tasks by using :py:meth:`waflib.Tools.qt5.qxx.add_moc_tasks` (if necessary), then postpone the task execution (there is no need to recompute the task signature). """ if self.moc_done: return Task.Task.runnable_status(self) else: for t in self.run_after: if not t.hasrun: return Task.ASK_LATER self.add_moc_tasks() return Task.Task.runnable_status(self) def create_moc_task(self, h_node, m_node): """ If several libraries use the same classes, it is possible that moc will run several times (Issue 1318) It is not possible to change the file names, but we can assume that the moc transformation will be identical, and the moc tasks can be shared in a global cache. """ try: moc_cache = self.generator.bld.moc_cache except AttributeError: moc_cache = self.generator.bld.moc_cache = {} try: return moc_cache[h_node] except KeyError: tsk = moc_cache[h_node] = Task.classes['moc'](env=self.env, generator=self.generator) tsk.set_inputs(h_node) tsk.set_outputs(m_node) tsk.env.append_unique('MOC_FLAGS', '-i') if self.generator: self.generator.tasks.append(tsk) # direct injection in the build phase (safe because called from the main thread) gen = self.generator.bld.producer gen.outstanding.append(tsk) gen.total += 1 return tsk else: # remove the signature, it must be recomputed with the moc task delattr(self, 'cache_sig') def add_moc_tasks(self): """ Creates moc tasks by looking in the list of file dependencies ``bld.raw_deps[self.uid()]`` """ node = self.inputs[0] bld = self.generator.bld # skip on uninstall due to generated files if bld.is_install == Build.UNINSTALL: return try: # compute the signature once to know if there is a moc file to create self.signature() except KeyError: # the moc file may be referenced somewhere else pass else: # remove the signature, it must be recomputed with the moc task delattr(self, 'cache_sig') include_nodes = [node.parent] + self.generator.includes_nodes moctasks = [] mocfiles = set() for d in bld.raw_deps.get(self.uid(), []): if not d.endswith('.moc'): continue # process that base.moc only once if d in mocfiles: continue mocfiles.add(d) # find the source associated with the moc file h_node = None base2 = d[:-4] # foo.moc from foo.cpp prefix = node.name[:node.name.rfind('.')] if base2 == prefix: h_node = node else: # this deviates from the standard # if bar.cpp includes foo.moc, then assume it is from foo.h for x in include_nodes: for e in MOC_H: h_node = x.find_node(base2 + e) if h_node: break else: continue break if h_node: m_node = h_node.change_ext('.moc') else: raise Errors.WafError('No source found for %r which is a moc file' % d) # create the moc task task = self.create_moc_task(h_node, m_node) moctasks.append(task) # simple scheduler dependency: run the moc task before others self.run_after.update(set(moctasks)) self.moc_done = 1 class trans_update(Task.Task): """Updates a .ts files from a list of C++ files""" run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}' color = 'BLUE' class XMLHandler(ContentHandler): """ Parses ``.qrc`` files """ def __init__(self): ContentHandler.__init__(self) self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(str(''.join(self.buf))) def characters(self, cars): self.buf.append(cars) @extension(*EXT_RCC) def create_rcc_task(self, node): "Creates rcc and cxx tasks for ``.qrc`` files" rcnode = node.change_ext('_rc.%d.cpp' % self.idx) self.create_task('rcc', node, rcnode) cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) try: self.compiled_tasks.append(cpptask) except AttributeError: self.compiled_tasks = [cpptask] return cpptask @extension(*EXT_UI) def create_uic_task(self, node): "Create uic tasks for user interface ``.ui`` definition files" """ If UIC file is used in more than one bld, we would have a conflict in parallel execution It is not possible to change the file names (like .self.idx. as for objects) as they have to be referenced by the source file, but we can assume that the transformation will be identical and the tasks can be shared in a global cache. """ try: uic_cache = self.bld.uic_cache except AttributeError: uic_cache = self.bld.uic_cache = {} if node not in uic_cache: uictask = uic_cache[node] = self.create_task('ui5', node) uictask.outputs = [node.parent.find_or_declare(self.env.ui_PATTERN % node.name[:-3])] @extension('.ts') def add_lang(self, node): """Adds all the .ts file into ``self.lang``""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('qt5') @before_method('process_source') def process_mocs(self): """ Processes MOC files included in headers:: def build(bld): bld.program(features='qt5', source='main.cpp', target='app', use='QT5CORE', moc='foo.h') The build will run moc on foo.h to create moc_foo.n.cpp. The number in the file name is provided to avoid name clashes when the same headers are used by several targets. """ lst = self.to_nodes(getattr(self, 'moc', [])) self.source = self.to_list(getattr(self, 'source', [])) for x in lst: prefix = x.name[:x.name.rfind('.')] # foo.h -> foo moc_target = 'moc_%s.%d.cpp' % (prefix, self.idx) moc_node = x.parent.find_or_declare(moc_target) self.source.append(moc_node) self.create_task('moc', x, moc_node) @feature('qt5') @after_method('apply_link') def apply_qt5(self): """ Adds MOC_FLAGS which may be necessary for moc:: def build(bld): bld.program(features='qt5', source='main.cpp', target='app', use='QT5CORE') The additional parameters are: :param lang: list of translation files (\\*.ts) to process :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension :param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**) :type update: bool :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension """ if getattr(self, 'lang', None): qmtasks = [] for x in self.to_list(self.lang): if isinstance(x, str): x = self.path.find_resource(x + '.ts') qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.%d.qm' % self.idx))) if getattr(self, 'update', None) and Options.options.trans_qt5: cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [ a.inputs[0] for a in self.tasks if a.inputs and a.inputs[0].name.endswith('.ui')] for x in qmtasks: self.create_task('trans_update', cxxnodes, x.inputs) if getattr(self, 'langname', None): qmnodes = [x.outputs[0] for x in qmtasks] rcnode = self.langname if isinstance(rcnode, str): rcnode = self.path.find_or_declare(rcnode + ('.%d.qrc' % self.idx)) t = self.create_task('qm2rcc', qmnodes, rcnode) k = create_rcc_task(self, t.outputs[0]) self.link_task.inputs.append(k.outputs[0]) lst = [] for flag in self.to_list(self.env.CXXFLAGS): if len(flag) < 2: continue f = flag[0:2] if f in ('-D', '-I', '/D', '/I'): if (f[0] == '/'): lst.append('-' + flag[1:]) else: lst.append(flag) self.env.append_value('MOC_FLAGS', lst) @extension(*EXT_QT5) def cxx_hook(self, node): """ Re-maps C++ file extensions to the :py:class:`waflib.Tools.qt5.qxx` task. """ return self.create_compiled_task('qxx', node) class rcc(Task.Task): """ Processes ``.qrc`` files """ color = 'BLUE' run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' ext_out = ['.h'] def rcname(self): return os.path.splitext(self.inputs[0].name)[0] def scan(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') return ([], []) parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) with open(self.inputs[0].abspath(), 'r') as f: parser.parse(f) nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) def quote_flag(self, x): """ Override Task.quote_flag. QT parses the argument files differently than cl.exe and link.exe :param x: flag :type x: string :return: quoted flag :rtype: string """ return x class moc(Task.Task): """ Creates ``.moc`` files """ color = 'BLUE' run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}' def quote_flag(self, x): """ Override Task.quote_flag. QT parses the argument files differently than cl.exe and link.exe :param x: flag :type x: string :return: quoted flag :rtype: string """ return x class ui5(Task.Task): """ Processes ``.ui`` files """ color = 'BLUE' run_str = '${QT_UIC} ${SRC} -o ${TGT}' ext_out = ['.h'] class ts2qm(Task.Task): """ Generates ``.qm`` files from ``.ts`` files """ color = 'BLUE' run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(Task.Task): """ Generates ``.qrc`` files from ``.qm`` files """ color = 'BLUE' after = 'ts2qm' def run(self): """Create a qrc file including the inputs""" txt = '\n'.join(['%s' % k.path_from(self.outputs[0].parent) for k in self.inputs]) code = '\n\n%s\n\n' % txt self.outputs[0].write(code) def configure(self): """ Besides the configuration options, the environment variable QT5_ROOT may be used to give the location of the qt5 libraries (absolute path). The detection uses the program ``pkg-config`` through :py:func:`waflib.Tools.config_c.check_cfg` """ if 'COMPILER_CXX' not in self.env: self.fatal('No CXX compiler defined: did you forget to configure compiler_cxx first?') self.find_qt5_binaries() self.set_qt5_libs_dir() self.set_qt5_libs_to_check() self.set_qt5_defines() self.find_qt5_libraries() self.add_qt5_rpath() self.simplify_qt5_libs() # warn about this during the configuration too if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') # Qt5 may be compiled with '-reduce-relocations' which requires dependent programs to have -fPIE or -fPIC? frag = '#include \nint main(int argc, char **argv) {QMap m;return m.keys().size();}\n' uses = 'QT5CORE' for flag in [[], '-fPIE', '-fPIC', '-std=c++11' , ['-std=c++11', '-fPIE'], ['-std=c++11', '-fPIC']]: msg = 'See if Qt files compile ' if flag: msg += 'with %s' % flag try: self.check(features='qt5 cxx', use=uses, uselib_store='qt5', cxxflags=flag, fragment=frag, msg=msg) except self.errors.ConfigurationError: pass else: break else: self.fatal('Could not build a simple Qt application') # FreeBSD does not add /usr/local/lib and the pkg-config files do not provide it either :-/ if Utils.unversioned_sys_platform() == 'freebsd': frag = '#include \nint main(int argc, char **argv) {QMap m;return m.keys().size();}\n' try: self.check(features='qt5 cxx cxxprogram', use=uses, fragment=frag, msg='Can we link Qt programs on FreeBSD directly?') except self.errors.ConfigurationError: self.check(features='qt5 cxx cxxprogram', use=uses, uselib_store='qt5', libpath='/usr/local/lib', fragment=frag, msg='Is /usr/local/lib required?') @conf def find_qt5_binaries(self): """ Detects Qt programs such as qmake, moc, uic, lrelease """ env = self.env opt = Options.options qtdir = getattr(opt, 'qtdir', '') qtbin = getattr(opt, 'qtbin', '') paths = [] if qtdir: qtbin = os.path.join(qtdir, 'bin') # the qt directory has been given from QT5_ROOT - deduce the qt binary path if not qtdir: qtdir = self.environ.get('QT5_ROOT', '') qtbin = self.environ.get('QT5_BIN') or os.path.join(qtdir, 'bin') if qtbin: paths = [qtbin] # no qtdir, look in the path and in /usr/local/Trolltech if not qtdir: paths = self.environ.get('PATH', '').split(os.pathsep) paths.extend(['/usr/share/qt5/bin', '/usr/local/lib/qt5/bin']) try: lst = Utils.listdir('/usr/local/Trolltech/') except OSError: pass else: if lst: lst.sort() lst.reverse() # keep the highest version qtdir = '/usr/local/Trolltech/%s/' % lst[0] qtbin = os.path.join(qtdir, 'bin') paths.append(qtbin) # at the end, try to find qmake in the paths given # keep the one with the highest version cand = None prev_ver = ['5', '0', '0'] for qmk in ('qmake-qt5', 'qmake5', 'qmake'): try: qmake = self.find_program(qmk, path_list=paths) except self.errors.ConfigurationError: pass else: try: version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip() except self.errors.WafError: pass else: if version: new_ver = version.split('.') if new_ver > prev_ver: cand = qmake prev_ver = new_ver # qmake could not be found easily, rely on qtchooser if not cand: try: self.find_program('qtchooser') except self.errors.ConfigurationError: pass else: cmd = self.env.QTCHOOSER + ['-qt=5', '-run-tool=qmake'] try: version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION']) except self.errors.WafError: pass else: cand = cmd if cand: self.env.QMAKE = cand else: self.fatal('Could not find qmake for qt5') self.env.QT_HOST_BINS = qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_HOST_BINS']).strip() paths.insert(0, qtbin) def find_bin(lst, var): if var in env: return for f in lst: try: ret = self.find_program(f, path_list=paths) except self.errors.ConfigurationError: pass else: env[var]=ret break find_bin(['uic-qt5', 'uic'], 'QT_UIC') if not env.QT_UIC: self.fatal('cannot find the uic compiler for qt5') self.start_msg('Checking for uic version') uicver = self.cmd_and_log(env.QT_UIC + ['-version'], output=Context.BOTH) uicver = ''.join(uicver).strip() uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '') self.end_msg(uicver) if uicver.find(' 3.') != -1 or uicver.find(' 4.') != -1: self.fatal('this uic compiler is for qt3 or qt4, add uic for qt5 to your path') find_bin(['moc-qt5', 'moc'], 'QT_MOC') find_bin(['rcc-qt5', 'rcc'], 'QT_RCC') find_bin(['lrelease-qt5', 'lrelease'], 'QT_LRELEASE') find_bin(['lupdate-qt5', 'lupdate'], 'QT_LUPDATE') env.UIC_ST = '%s -o %s' env.MOC_ST = '-o' env.ui_PATTERN = 'ui_%s.h' env.QT_LRELEASE_FLAGS = ['-silent'] env.MOCCPPPATH_ST = '-I%s' env.MOCDEFINES_ST = '-D%s' @conf def set_qt5_libs_dir(self): env = self.env qtlibs = getattr(Options.options, 'qtlibs', None) or self.environ.get('QT5_LIBDIR') if not qtlibs: try: qtlibs = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip() except Errors.WafError: qtdir = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip() qtlibs = os.path.join(qtdir, 'lib') self.msg('Found the Qt5 library path', qtlibs) env.QTLIBS = qtlibs @conf def find_single_qt5_lib(self, name, uselib, qtlibs, qtincludes, force_static): env = self.env if force_static: exts = ('.a', '.lib') prefix = 'STLIB' else: exts = ('.so', '.lib') prefix = 'LIB' def lib_names(): for x in exts: for k in ('', '5') if Utils.is_win32 else ['']: for p in ('lib', ''): yield (p, name, k, x) for tup in lib_names(): k = ''.join(tup) path = os.path.join(qtlibs, k) if os.path.exists(path): if env.DEST_OS == 'win32': libval = ''.join(tup[:-1]) else: libval = name env.append_unique(prefix + '_' + uselib, libval) env.append_unique('%sPATH_%s' % (prefix, uselib), qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, name.replace('Qt5', 'Qt'))) return k return False @conf def find_qt5_libraries(self): env = self.env qtincludes = self.environ.get('QT5_INCLUDES') or self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_HEADERS']).strip() force_static = self.environ.get('QT5_FORCE_STATIC') try: if self.environ.get('QT5_XCOMPILE'): self.fatal('QT5_XCOMPILE Disables pkg-config detection') self.check_cfg(atleast_pkgconfig_version='0.1') except self.errors.ConfigurationError: for i in self.qt5_vars: uselib = i.upper() if Utils.unversioned_sys_platform() == 'darwin': # Since at least qt 4.7.3 each library locates in separate directory fwk = i.replace('Qt5', 'Qt') frameworkName = fwk + '.framework' qtDynamicLib = os.path.join(env.QTLIBS, frameworkName, fwk) if os.path.exists(qtDynamicLib): env.append_unique('FRAMEWORK_' + uselib, fwk) env.append_unique('FRAMEWORKPATH_' + uselib, env.QTLIBS) self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('INCLUDES_' + uselib, os.path.join(env.QTLIBS, frameworkName, 'Headers')) else: ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, force_static) if not force_static and not ret: ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, True) self.msg('Checking for %s' % i, ret, 'GREEN' if ret else 'YELLOW') else: path = '%s:%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib' % ( self.environ.get('PKG_CONFIG_PATH', ''), env.QTLIBS, env.QTLIBS) for i in self.qt5_vars: self.check_cfg(package=i, args='--cflags --libs', mandatory=False, force_static=force_static, pkg_config_path=path) @conf def simplify_qt5_libs(self): """ Since library paths make really long command-lines, and since everything depends on qtcore, remove the qtcore ones from qtgui, etc """ env = self.env def process_lib(vars_, coreval): for d in vars_: var = d.upper() if var == 'QTCORE': continue value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if lib in core: continue accu.append(lib) env['LIBPATH_'+var] = accu process_lib(self.qt5_vars, 'LIBPATH_QTCORE') @conf def add_qt5_rpath(self): """ Defines rpath entries for Qt libraries """ env = self.env if getattr(Options.options, 'want_rpath', False): def process_rpath(vars_, coreval): for d in vars_: var = d.upper() value = env['LIBPATH_' + var] if value: core = env[coreval] accu = [] for lib in value: if var != 'QTCORE': if lib in core: continue accu.append('-Wl,--rpath='+lib) env['RPATH_' + var] = accu process_rpath(self.qt5_vars, 'LIBPATH_QTCORE') @conf def set_qt5_libs_to_check(self): self.qt5_vars = Utils.to_list(getattr(self, 'qt5_vars', [])) if not self.qt5_vars: dirlst = Utils.listdir(self.env.QTLIBS) pat = self.env.cxxshlib_PATTERN if Utils.is_win32: pat = pat.replace('.dll', '.lib') if self.environ.get('QT5_FORCE_STATIC'): pat = self.env.cxxstlib_PATTERN if Utils.unversioned_sys_platform() == 'darwin': pat = r"%s\.framework" re_qt = re.compile(pat%'Qt5?(?P.*)'+'$') for x in dirlst: m = re_qt.match(x) if m: self.qt5_vars.append("Qt5%s" % m.group('name')) if not self.qt5_vars: self.fatal('cannot find any Qt5 library (%r)' % self.env.QTLIBS) qtextralibs = getattr(Options.options, 'qtextralibs', None) if qtextralibs: self.qt5_vars.extend(qtextralibs.split(',')) @conf def set_qt5_defines(self): if sys.platform != 'win32': return for x in self.qt5_vars: y=x.replace('Qt5', 'Qt')[2:].upper() self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y) def options(opt): """ Command-line options """ opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries') for i in 'qtdir qtbin qtlibs'.split(): opt.add_option('--'+i, type='string', default='', dest=i) opt.add_option('--translate', action='store_true', help='collect translation strings', dest='trans_qt5', default=False) opt.add_option('--qtextralibs', type='string', default='', dest='qtextralibs', help='additional qt libraries on the system to add to default ones, comma separated') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/ruby.py0000660000000000000000000001271300000000000022044 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # daniel.svensson at purplescout.se 2008 # Thomas Nagy 2016-2018 (ita) """ Support for Ruby extensions. A C/C++ compiler is required:: def options(opt): opt.load('compiler_c ruby') def configure(conf): conf.load('compiler_c ruby') conf.check_ruby_version((1,8,0)) conf.check_ruby_ext_devel() conf.check_ruby_module('libxml') def build(bld): bld( features = 'c cshlib rubyext', source = 'rb_mytest.c', target = 'mytest_ext', install_path = '${ARCHDIR_RUBY}') bld.install_files('${LIBDIR_RUBY}', 'Mytest.rb') """ import os from waflib import Errors, Options, Task, Utils from waflib.TaskGen import before_method, feature, extension from waflib.Configure import conf @feature('rubyext') @before_method('apply_incpaths', 'process_source', 'apply_bundle', 'apply_link') def init_rubyext(self): """ Add required variables for ruby extensions """ self.install_path = '${ARCHDIR_RUBY}' self.uselib = self.to_list(getattr(self, 'uselib', '')) if not 'RUBY' in self.uselib: self.uselib.append('RUBY') if not 'RUBYEXT' in self.uselib: self.uselib.append('RUBYEXT') @feature('rubyext') @before_method('apply_link', 'propagate_uselib_vars') def apply_ruby_so_name(self): """ Strip the *lib* prefix from ruby extensions """ self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.rubyext_PATTERN @conf def check_ruby_version(self, minver=()): """ Checks if ruby is installed. If installed the variable RUBY will be set in environment. The ruby binary can be overridden by ``--with-ruby-binary`` command-line option. """ ruby = self.find_program('ruby', var='RUBY', value=Options.options.rubybinary) try: version = self.cmd_and_log(ruby + ['-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip() except Errors.WafError: self.fatal('could not determine ruby version') self.env.RUBY_VERSION = version try: ver = tuple(map(int, version.split('.'))) except Errors.WafError: self.fatal('unsupported ruby version %r' % version) cver = '' if minver: cver = '> ' + '.'.join(str(x) for x in minver) if ver < minver: self.fatal('ruby is too old %r' % ver) self.msg('Checking for ruby version %s' % cver, version) @conf def check_ruby_ext_devel(self): """ Check if a ruby extension can be created """ if not self.env.RUBY: self.fatal('ruby detection is required first') if not self.env.CC_NAME and not self.env.CXX_NAME: self.fatal('load a c/c++ compiler first') version = tuple(map(int, self.env.RUBY_VERSION.split("."))) def read_out(cmd): return Utils.to_list(self.cmd_and_log(self.env.RUBY + ['-rrbconfig', '-e', cmd])) def read_config(key): return read_out('puts RbConfig::CONFIG[%r]' % key) cpppath = archdir = read_config('archdir') if version >= (1, 9, 0): ruby_hdrdir = read_config('rubyhdrdir') cpppath += ruby_hdrdir if version >= (2, 0, 0): cpppath += read_config('rubyarchhdrdir') cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])] self.check(header_name='ruby.h', includes=cpppath, errmsg='could not find ruby header file', link_header_test=False) self.env.LIBPATH_RUBYEXT = read_config('libdir') self.env.LIBPATH_RUBYEXT += archdir self.env.INCLUDES_RUBYEXT = cpppath self.env.CFLAGS_RUBYEXT = read_config('CCDLFLAGS') self.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0] # ok this is really stupid, but the command and flags are combined. # so we try to find the first argument... flags = read_config('LDSHARED') while flags and flags[0][0] != '-': flags = flags[1:] # we also want to strip out the deprecated ppc flags if len(flags) > 1 and flags[1] == "ppc": flags = flags[2:] self.env.LINKFLAGS_RUBYEXT = flags self.env.LINKFLAGS_RUBYEXT += read_config('LIBS') self.env.LINKFLAGS_RUBYEXT += read_config('LIBRUBYARG_SHARED') if Options.options.rubyarchdir: self.env.ARCHDIR_RUBY = Options.options.rubyarchdir else: self.env.ARCHDIR_RUBY = read_config('sitearchdir')[0] if Options.options.rubylibdir: self.env.LIBDIR_RUBY = Options.options.rubylibdir else: self.env.LIBDIR_RUBY = read_config('sitelibdir')[0] @conf def check_ruby_module(self, module_name): """ Check if the selected ruby interpreter can require the given ruby module:: def configure(conf): conf.check_ruby_module('libxml') :param module_name: module :type module_name: string """ self.start_msg('Ruby module %s' % module_name) try: self.cmd_and_log(self.env.RUBY + ['-e', 'require \'%s\';puts 1' % module_name]) except Errors.WafError: self.end_msg(False) self.fatal('Could not find the ruby module %r' % module_name) self.end_msg(True) @extension('.rb') def process(self, node): return self.create_task('run_ruby', node) class run_ruby(Task.Task): """ Task to run ruby files detected by file extension .rb:: def options(opt): opt.load('ruby') def configure(ctx): ctx.check_ruby_version() def build(bld): bld.env.RBFLAGS = '-e puts "hello world"' bld(source='a_ruby_file.rb') """ run_str = '${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}' def options(opt): """ Add the ``--with-ruby-archdir``, ``--with-ruby-libdir`` and ``--with-ruby-binary`` options """ opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files') opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path') opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/suncc.py0000660000000000000000000000272300000000000022176 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_scc(conf): """ Detects the Sun C compiler """ v = conf.env cc = conf.find_program('cc', var='CC') try: conf.cmd_and_log(cc + ['-flags']) except Errors.WafError: conf.fatal('%r is not a Sun compiler' % cc) v.CC_NAME = 'sun' conf.get_suncc_version(cc) @conf def scc_common_flags(conf): """ Flags required for executing the sun C compiler """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o', ''] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o', ''] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Bdynamic' v.STLIB_MARKER = '-Bstatic' v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-xcode=pic32', '-DPIC'] v.LINKFLAGS_cshlib = ['-G'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = ['-Bstatic'] v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_scc() conf.find_ar() conf.scc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/suncxx.py0000660000000000000000000000274700000000000022421 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_sxx(conf): """ Detects the sun C++ compiler """ v = conf.env cc = conf.find_program(['CC', 'c++'], var='CXX') try: conf.cmd_and_log(cc + ['-flags']) except Errors.WafError: conf.fatal('%r is not a Sun compiler' % cc) v.CXX_NAME = 'sun' conf.get_suncc_version(cc) @conf def sxx_common_flags(conf): """ Flags required for executing the sun C++ compiler """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o', ''] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o', ''] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Bdynamic' v.STLIB_MARKER = '-Bstatic' v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-xcode=pic32', '-DPIC'] v.LINKFLAGS_cxxshlib = ['-G'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = ['-Bstatic'] v.cxxstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_sxx() conf.find_ar() conf.sxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/tex.py0000660000000000000000000003573100000000000021670 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ TeX/LaTeX/PDFLaTeX/XeLaTeX support Example:: def configure(conf): conf.load('tex') if not conf.env.LATEX: conf.fatal('The program LaTex is required') def build(bld): bld( features = 'tex', type = 'latex', # pdflatex or xelatex source = 'document.ltx', # mandatory, the source outs = 'ps', # 'pdf' or 'ps pdf' deps = 'crossreferencing.lst', # to give dependencies directly prompt = 1, # 0 for the batch mode ) Notes: - To configure with a special program, use:: $ PDFLATEX=luatex waf configure - This tool does not use the target attribute of the task generator (``bld(target=...)``); the target file name is built from the source base name and the output type(s) """ import os, re from waflib import Utils, Task, Errors, Logs, Node from waflib.TaskGen import feature, before_method re_bibunit = re.compile(r'\\(?Pputbib)\[(?P[^\[\]]*)\]',re.M) def bibunitscan(self): """ Parses TeX inputs and try to find the *bibunit* file dependencies :return: list of bibunit files :rtype: list of :py:class:`waflib.Node.Node` """ node = self.inputs[0] nodes = [] if not node: return nodes code = node.read() for match in re_bibunit.finditer(code): path = match.group('file') if path: found = None for k in ('', '.bib'): # add another loop for the tex include paths? Logs.debug('tex: trying %s%s', path, k) fi = node.parent.find_resource(path + k) if fi: found = True nodes.append(fi) # no break if not found: Logs.debug('tex: could not find %s', path) Logs.debug('tex: found the following bibunit files: %s', nodes) return nodes exts_deps_tex = ['', '.ltx', '.tex', '.bib', '.pdf', '.png', '.eps', '.ps', '.sty'] """List of typical file extensions included in latex files""" exts_tex = ['.ltx', '.tex'] """List of typical file extensions that contain latex""" re_tex = re.compile(r'\\(?Pusepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P[^{}]*)}',re.M) """Regexp for expressions that may include latex files""" g_bibtex_re = re.compile('bibdata', re.M) """Regexp for bibtex files""" g_glossaries_re = re.compile('\\@newglossary', re.M) """Regexp for expressions that create glossaries""" class tex(Task.Task): """ Compiles a tex/latex file. .. inheritance-diagram:: waflib.Tools.tex.latex waflib.Tools.tex.xelatex waflib.Tools.tex.pdflatex """ bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False) bibtex_fun.__doc__ = """ Execute the program **bibtex** """ makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False) makeindex_fun.__doc__ = """ Execute the program **makeindex** """ makeglossaries_fun, _ = Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}', shell=False) makeglossaries_fun.__doc__ = """ Execute the program **makeglossaries** """ def exec_command(self, cmd, **kw): """ Executes TeX commands without buffering (latex may prompt for inputs) :return: the return code :rtype: int """ if self.env.PROMPT_LATEX: # capture the outputs in configuration tests kw['stdout'] = kw['stderr'] = None return super(tex, self).exec_command(cmd, **kw) def scan_aux(self, node): """ Recursive regex-based scanner that finds included auxiliary files. """ nodes = [node] re_aux = re.compile(r'\\@input{(?P[^{}]*)}', re.M) def parse_node(node): code = node.read() for match in re_aux.finditer(code): path = match.group('file') found = node.parent.find_or_declare(path) if found and found not in nodes: Logs.debug('tex: found aux node %r', found) nodes.append(found) parse_node(found) parse_node(node) return nodes def scan(self): """ Recursive regex-based scanner that finds latex dependencies. It uses :py:attr:`waflib.Tools.tex.re_tex` Depending on your needs you might want: * to change re_tex:: from waflib.Tools import tex tex.re_tex = myregex * or to change the method scan from the latex tasks:: from waflib.Task import classes classes['latex'].scan = myscanfunction """ node = self.inputs[0] nodes = [] names = [] seen = [] if not node: return (nodes, names) def parse_node(node): if node in seen: return seen.append(node) code = node.read() for match in re_tex.finditer(code): multibib = match.group('type') if multibib and multibib.startswith('bibliography'): multibib = multibib[len('bibliography'):] if multibib.startswith('style'): continue else: multibib = None for path in match.group('file').split(','): if path: add_name = True found = None for k in exts_deps_tex: # issue 1067, scan in all texinputs folders for up in self.texinputs_nodes: Logs.debug('tex: trying %s%s', path, k) found = up.find_resource(path + k) if found: break for tsk in self.generator.tasks: if not found or found in tsk.outputs: break else: nodes.append(found) add_name = False for ext in exts_tex: if found.name.endswith(ext): parse_node(found) break # multibib stuff if found and multibib and found.name.endswith('.bib'): try: self.multibibs.append(found) except AttributeError: self.multibibs = [found] # no break, people are crazy if add_name: names.append(path) parse_node(node) for x in nodes: x.parent.get_bld().mkdir() Logs.debug("tex: found the following : %s and names %s", nodes, names) return (nodes, names) def check_status(self, msg, retcode): """ Checks an exit status and raise an error with a particular message :param msg: message to display if the code is non-zero :type msg: string :param retcode: condition :type retcode: boolean """ if retcode != 0: raise Errors.WafError('%r command exit status %r' % (msg, retcode)) def info(self, *k, **kw): try: info = self.generator.bld.conf.logger.info except AttributeError: info = Logs.info info(*k, **kw) def bibfile(self): """ Parses *.aux* files to find bibfiles to process. If present, execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun` """ for aux_node in self.aux_nodes: try: ct = aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r', aux_node.abspath()) continue if g_bibtex_re.findall(ct): self.info('calling bibtex') self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = aux_node.name[:-4] self.check_status('error when calling bibtex', self.bibtex_fun()) for node in getattr(self, 'multibibs', []): self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = node.name[:-4] self.check_status('error when calling bibtex', self.bibtex_fun()) def bibunits(self): """ Parses *.aux* file to find bibunit files. If there are bibunit files, runs :py:meth:`waflib.Tools.tex.tex.bibtex_fun`. """ try: bibunits = bibunitscan(self) except OSError: Logs.error('error bibunitscan') else: if bibunits: fn = ['bu' + str(i) for i in range(1, len(bibunits) + 1)] if fn: self.info('calling bibtex on bibunits') for f in fn: self.env.env = {'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()} self.env.SRCFILE = f self.check_status('error when calling bibtex', self.bibtex_fun()) def makeindex(self): """ Searches the filesystem for *.idx* files to process. If present, runs :py:meth:`waflib.Tools.tex.tex.makeindex_fun` """ self.idx_node = self.inputs[0].change_ext('.idx') try: idx_path = self.idx_node.abspath() os.stat(idx_path) except OSError: self.info('index file %s absent, not calling makeindex', idx_path) else: self.info('calling makeindex') self.env.SRCFILE = self.idx_node.name self.env.env = {} self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun()) def bibtopic(self): """ Lists additional .aux files from the bibtopic package """ p = self.inputs[0].parent.get_bld() if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')): self.aux_nodes += p.ant_glob('*[0-9].aux') def makeglossaries(self): """ Lists additional glossaries from .aux files. If present, runs the makeglossaries program. """ src_file = self.inputs[0].abspath() base_file = os.path.basename(src_file) base, _ = os.path.splitext(base_file) for aux_node in self.aux_nodes: try: ct = aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r', aux_node.abspath()) continue if g_glossaries_re.findall(ct): if not self.env.MAKEGLOSSARIES: raise Errors.WafError("The program 'makeglossaries' is missing!") Logs.warn('calling makeglossaries') self.env.SRCFILE = base self.check_status('error when calling makeglossaries %s' % base, self.makeglossaries_fun()) return def texinputs(self): """ Returns the list of texinput nodes as a string suitable for the TEXINPUTS environment variables :rtype: string """ return os.pathsep.join([k.abspath() for k in self.texinputs_nodes]) + os.pathsep def run(self): """ Runs the whole TeX build process Multiple passes are required depending on the usage of cross-references, bibliographies, glossaries, indexes and additional contents The appropriate TeX compiler is called until the *.aux* files stop changing. """ env = self.env if not env.PROMPT_LATEX: env.append_value('LATEXFLAGS', '-interaction=batchmode') env.append_value('PDFLATEXFLAGS', '-interaction=batchmode') env.append_value('XELATEXFLAGS', '-interaction=batchmode') # important, set the cwd for everybody self.cwd = self.inputs[0].parent.get_bld() self.info('first pass on %s', self.__class__.__name__) # Hash .aux files before even calling the LaTeX compiler cur_hash = self.hash_aux_nodes() self.call_latex() # Find the .aux files again since bibtex processing can require it self.hash_aux_nodes() self.bibtopic() self.bibfile() self.bibunits() self.makeindex() self.makeglossaries() for i in range(10): # There is no need to call latex again if the .aux hash value has not changed prev_hash = cur_hash cur_hash = self.hash_aux_nodes() if not cur_hash: Logs.error('No aux.h to process') if cur_hash and cur_hash == prev_hash: break # run the command self.info('calling %s', self.__class__.__name__) self.call_latex() def hash_aux_nodes(self): """ Returns a hash of the .aux file contents :rtype: string or bytes """ try: self.aux_nodes except AttributeError: try: self.aux_nodes = self.scan_aux(self.inputs[0].change_ext('.aux')) except IOError: return None return Utils.h_list([Utils.h_file(x.abspath()) for x in self.aux_nodes]) def call_latex(self): """ Runs the TeX compiler once """ self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'TEXINPUTS': self.texinputs()}) self.env.SRCFILE = self.inputs[0].abspath() self.check_status('error when calling latex', self.texfun()) class latex(tex): "Compiles LaTeX files" texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) class pdflatex(tex): "Compiles PdfLaTeX files" texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) class xelatex(tex): "XeLaTeX files" texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False) class dvips(Task.Task): "Converts dvi files to postscript" run_str = '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] class dvipdf(Task.Task): "Converts dvi files to pdf" run_str = '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] class pdf2ps(Task.Task): "Converts pdf files to postscript" run_str = '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] @feature('tex') @before_method('process_source') def apply_tex(self): """ Creates :py:class:`waflib.Tools.tex.tex` objects, and dvips/dvipdf/pdf2ps tasks if necessary (outs='ps', etc). """ if not getattr(self, 'type', None) in ('latex', 'pdflatex', 'xelatex'): self.type = 'pdflatex' outs = Utils.to_list(getattr(self, 'outs', [])) # prompt for incomplete files (else the batchmode is used) try: self.generator.bld.conf except AttributeError: default_prompt = False else: default_prompt = True self.env.PROMPT_LATEX = getattr(self, 'prompt', default_prompt) deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for dep in deps: if isinstance(dep, str): n = self.path.find_resource(dep) if not n: self.bld.fatal('Could not find %r for %r' % (dep, self)) if not n in deps_lst: deps_lst.append(n) elif isinstance(dep, Node.Node): deps_lst.append(dep) for node in self.to_nodes(self.source): if self.type == 'latex': task = self.create_task('latex', node, node.change_ext('.dvi')) elif self.type == 'pdflatex': task = self.create_task('pdflatex', node, node.change_ext('.pdf')) elif self.type == 'xelatex': task = self.create_task('xelatex', node, node.change_ext('.pdf')) task.env = self.env # add the manual dependencies if deps_lst: for n in deps_lst: if not n in task.dep_nodes: task.dep_nodes.append(n) # texinputs is a nasty beast if hasattr(self, 'texinputs_nodes'): task.texinputs_nodes = self.texinputs_nodes else: task.texinputs_nodes = [node.parent, node.parent.get_bld(), self.path, self.path.get_bld()] lst = os.environ.get('TEXINPUTS', '') if self.env.TEXINPUTS: lst += os.pathsep + self.env.TEXINPUTS if lst: lst = lst.split(os.pathsep) for x in lst: if x: if os.path.isabs(x): p = self.bld.root.find_node(x) if p: task.texinputs_nodes.append(p) else: Logs.error('Invalid TEXINPUTS folder %s', x) else: Logs.error('Cannot resolve relative paths in TEXINPUTS %s', x) if self.type == 'latex': if 'ps' in outs: tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps')) tsk.env.env = dict(os.environ) if 'pdf' in outs: tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf')) tsk.env.env = dict(os.environ) elif self.type == 'pdflatex': if 'ps' in outs: self.create_task('pdf2ps', task.outputs, node.change_ext('.ps')) self.source = [] def configure(self): """ Find the programs tex, latex and others without raising errors. """ v = self.env for p in 'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split(): try: self.find_program(p, var=p.upper()) except self.errors.ConfigurationError: pass v.DVIPSFLAGS = '-Ppdf' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/vala.py0000660000000000000000000002615500000000000022013 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 # Radosław Szkodziński, 2010 """ At this point, vala is still unstable, so do not expect this tool to be too stable either (apis, etc) """ import re from waflib import Build, Context, Errors, Logs, Node, Options, Task, Utils from waflib.TaskGen import extension, taskgen_method from waflib.Configure import conf class valac(Task.Task): """ Compiles vala files """ #run_str = "${VALAC} ${VALAFLAGS}" # ideally #vars = ['VALAC_VERSION'] vars = ["VALAC", "VALAC_VERSION", "VALAFLAGS"] ext_out = ['.h'] def run(self): cmd = self.env.VALAC + self.env.VALAFLAGS resources = getattr(self, 'vala_exclude', []) cmd.extend([a.abspath() for a in self.inputs if a not in resources]) ret = self.exec_command(cmd, cwd=self.vala_dir_node.abspath()) if ret: return ret if self.generator.dump_deps_node: self.generator.dump_deps_node.write('\n'.join(self.generator.packages)) return ret @taskgen_method def init_vala_task(self): """ Initializes the vala task with the relevant data (acts as a constructor) """ self.profile = getattr(self, 'profile', 'gobject') self.packages = packages = Utils.to_list(getattr(self, 'packages', [])) self.use = Utils.to_list(getattr(self, 'use', [])) if packages and not self.use: self.use = packages[:] # copy if self.profile == 'gobject': if not 'GOBJECT' in self.use: self.use.append('GOBJECT') def addflags(flags): self.env.append_value('VALAFLAGS', flags) if self.profile: addflags('--profile=%s' % self.profile) valatask = self.valatask # output directory if hasattr(self, 'vala_dir'): if isinstance(self.vala_dir, str): valatask.vala_dir_node = self.path.get_bld().make_node(self.vala_dir) try: valatask.vala_dir_node.mkdir() except OSError: raise self.bld.fatal('Cannot create the vala dir %r' % valatask.vala_dir_node) else: valatask.vala_dir_node = self.vala_dir else: valatask.vala_dir_node = self.path.get_bld() addflags('--directory=%s' % valatask.vala_dir_node.abspath()) if hasattr(self, 'thread'): if self.profile == 'gobject': if not 'GTHREAD' in self.use: self.use.append('GTHREAD') else: #Vala doesn't have threading support for dova nor posix Logs.warn('Profile %s means no threading support', self.profile) self.thread = False if self.thread: addflags('--thread') self.is_lib = 'cprogram' not in self.features if self.is_lib: addflags('--library=%s' % self.target) h_node = valatask.vala_dir_node.find_or_declare('%s.h' % self.target) valatask.outputs.append(h_node) addflags('--header=%s' % h_node.name) valatask.outputs.append(valatask.vala_dir_node.find_or_declare('%s.vapi' % self.target)) if getattr(self, 'gir', None): gir_node = valatask.vala_dir_node.find_or_declare('%s.gir' % self.gir) addflags('--gir=%s' % gir_node.name) valatask.outputs.append(gir_node) self.vala_target_glib = getattr(self, 'vala_target_glib', getattr(Options.options, 'vala_target_glib', None)) if self.vala_target_glib: addflags('--target-glib=%s' % self.vala_target_glib) addflags(['--define=%s' % x for x in Utils.to_list(getattr(self, 'vala_defines', []))]) packages_private = Utils.to_list(getattr(self, 'packages_private', [])) addflags(['--pkg=%s' % x for x in packages_private]) def _get_api_version(): api_version = '1.0' if hasattr(Context.g_module, 'API_VERSION'): version = Context.g_module.API_VERSION.split(".") if version[0] == "0": api_version = "0." + version[1] else: api_version = version[0] + ".0" return api_version self.includes = Utils.to_list(getattr(self, 'includes', [])) valatask.install_path = getattr(self, 'install_path', '') valatask.vapi_path = getattr(self, 'vapi_path', '${DATAROOTDIR}/vala/vapi') valatask.pkg_name = getattr(self, 'pkg_name', self.env.PACKAGE) valatask.header_path = getattr(self, 'header_path', '${INCLUDEDIR}/%s-%s' % (valatask.pkg_name, _get_api_version())) valatask.install_binding = getattr(self, 'install_binding', True) self.vapi_dirs = vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', [])) #includes = [] if hasattr(self, 'use'): local_packages = Utils.to_list(self.use)[:] # make sure to have a copy seen = [] while len(local_packages) > 0: package = local_packages.pop() if package in seen: continue seen.append(package) # check if the package exists try: package_obj = self.bld.get_tgen_by_name(package) except Errors.WafError: continue # in practice the other task is already processed # but this makes it explicit package_obj.post() package_name = package_obj.target task = getattr(package_obj, 'valatask', None) if task: for output in task.outputs: if output.name == package_name + ".vapi": valatask.set_run_after(task) if package_name not in packages: packages.append(package_name) if output.parent not in vapi_dirs: vapi_dirs.append(output.parent) if output.parent not in self.includes: self.includes.append(output.parent) if hasattr(package_obj, 'use'): lst = self.to_list(package_obj.use) lst.reverse() local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages addflags(['--pkg=%s' % p for p in packages]) for vapi_dir in vapi_dirs: if isinstance(vapi_dir, Node.Node): v_node = vapi_dir else: v_node = self.path.find_dir(vapi_dir) if not v_node: Logs.warn('Unable to locate Vala API directory: %r', vapi_dir) else: addflags('--vapidir=%s' % v_node.abspath()) self.dump_deps_node = None if self.is_lib and self.packages: self.dump_deps_node = valatask.vala_dir_node.find_or_declare('%s.deps' % self.target) valatask.outputs.append(self.dump_deps_node) if self.is_lib and valatask.install_binding: headers_list = [o for o in valatask.outputs if o.suffix() == ".h"] if headers_list: self.install_vheader = self.add_install_files(install_to=valatask.header_path, install_from=headers_list) vapi_list = [o for o in valatask.outputs if (o.suffix() in (".vapi", ".deps"))] if vapi_list: self.install_vapi = self.add_install_files(install_to=valatask.vapi_path, install_from=vapi_list) gir_list = [o for o in valatask.outputs if o.suffix() == '.gir'] if gir_list: self.install_gir = self.add_install_files( install_to=getattr(self, 'gir_path', '${DATAROOTDIR}/gir-1.0'), install_from=gir_list) if hasattr(self, 'vala_resources'): nodes = self.to_nodes(self.vala_resources) valatask.vala_exclude = getattr(valatask, 'vala_exclude', []) + nodes valatask.inputs.extend(nodes) for x in nodes: addflags(['--gresources', x.abspath()]) @extension('.vala', '.gs') def vala_file(self, node): """ Compile a vala file and bind the task to *self.valatask*. If an existing vala task is already set, add the node to its inputs. The typical example is:: def build(bld): bld.program( packages = 'gtk+-2.0', target = 'vala-gtk-example', use = 'GTK GLIB', source = 'vala-gtk-example.vala foo.vala', vala_defines = ['DEBUG'] # adds --define= values to the command-line # the following arguments are for libraries #gir = 'hello-1.0', #gir_path = '/tmp', #vapi_path = '/tmp', #pkg_name = 'hello' # disable installing of gir, vapi and header #install_binding = False # profile = 'xyz' # adds --profile= to enable profiling # thread = True, # adds --thread, except if profile is on or not on 'gobject' # vala_target_glib = 'xyz' # adds --target-glib=, can be given through the command-line option --vala-target-glib= ) :param node: vala file :type node: :py:class:`waflib.Node.Node` """ try: valatask = self.valatask except AttributeError: valatask = self.valatask = self.create_task('valac') self.init_vala_task() valatask.inputs.append(node) name = node.name[:node.name.rfind('.')] + '.c' c_node = valatask.vala_dir_node.find_or_declare(name) valatask.outputs.append(c_node) self.source.append(c_node) @extension('.vapi') def vapi_file(self, node): try: valatask = self.valatask except AttributeError: valatask = self.valatask = self.create_task('valac') self.init_vala_task() valatask.inputs.append(node) @conf def find_valac(self, valac_name, min_version): """ Find the valac program, and execute it to store the version number in *conf.env.VALAC_VERSION* :param valac_name: program name :type valac_name: string or list of string :param min_version: minimum version acceptable :type min_version: tuple of int """ valac = self.find_program(valac_name, var='VALAC') try: output = self.cmd_and_log(valac + ['--version']) except Errors.WafError: valac_version = None else: ver = re.search(r'\d+.\d+.\d+', output).group().split('.') valac_version = tuple([int(x) for x in ver]) self.msg('Checking for %s version >= %r' % (valac_name, min_version), valac_version, valac_version and valac_version >= min_version) if valac and valac_version < min_version: self.fatal("%s version %r is too old, need >= %r" % (valac_name, valac_version, min_version)) self.env.VALAC_VERSION = valac_version return valac @conf def check_vala(self, min_version=(0,8,0), branch=None): """ Check if vala compiler from a given branch exists of at least a given version. :param min_version: minimum version acceptable (0.8.0) :type min_version: tuple :param branch: first part of the version number, in case a snapshot is used (0, 8) :type branch: tuple of int """ if self.env.VALA_MINVER: min_version = self.env.VALA_MINVER if self.env.VALA_MINVER_BRANCH: branch = self.env.VALA_MINVER_BRANCH if not branch: branch = min_version[:2] try: find_valac(self, 'valac-%d.%d' % (branch[0], branch[1]), min_version) except self.errors.ConfigurationError: find_valac(self, 'valac', min_version) @conf def check_vala_deps(self): """ Load the gobject and gthread packages if they are missing. """ if not self.env.HAVE_GOBJECT: pkg_args = {'package': 'gobject-2.0', 'uselib_store': 'GOBJECT', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib self.check_cfg(**pkg_args) if not self.env.HAVE_GTHREAD: pkg_args = {'package': 'gthread-2.0', 'uselib_store': 'GTHREAD', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib self.check_cfg(**pkg_args) def configure(self): """ Use the following to enforce minimum vala version:: def configure(conf): conf.env.VALA_MINVER = (0, 10, 0) conf.load('vala') """ self.load('gnu_dirs') self.check_vala_deps() self.check_vala() self.add_os_flags('VALAFLAGS') self.env.append_unique('VALAFLAGS', ['-C']) def options(opt): """ Load the :py:mod:`waflib.Tools.gnu_dirs` tool and add the ``--vala-target-glib`` command-line option """ opt.load('gnu_dirs') valaopts = opt.add_option_group('Vala Compiler Options') valaopts.add_option('--vala-target-glib', default=None, dest='vala_target_glib', metavar='MAJOR.MINOR', help='Target version of glib for Vala GObject code generation') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/Tools/waf_unit_test.py0000660000000000000000000002313100000000000023732 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 # Thomas Nagy, 2010-2018 (ita) """ Unit testing system for C/C++/D and interpreted languages providing test execution: * in parallel, by using ``waf -j`` * partial (only the tests that have changed) or full (by using ``waf --alltests``) The tests are declared by adding the **test** feature to programs:: def options(opt): opt.load('compiler_cxx waf_unit_test') def configure(conf): conf.load('compiler_cxx waf_unit_test') def build(bld): bld(features='cxx cxxprogram test', source='main.cpp', target='app') # or bld.program(features='test', source='main2.cpp', target='app2') When the build is executed, the program 'test' will be built and executed without arguments. The success/failure is detected by looking at the return code. The status and the standard output/error are stored on the build context. The results can be displayed by registering a callback function. Here is how to call the predefined callback:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) By passing --dump-test-scripts the build outputs corresponding python files (with extension _run.py) that are useful for debugging purposes. """ import os, shlex, sys from waflib.TaskGen import feature, after_method, taskgen_method from waflib import Utils, Task, Logs, Options from waflib.Tools import ccroot testlock = Utils.threading.Lock() SCRIPT_TEMPLATE = """#! %(python)s import subprocess, sys cmd = %(cmd)r # if you want to debug with gdb: #cmd = ['gdb', '-args'] + cmd env = %(env)r status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str)) sys.exit(status) """ @taskgen_method def handle_ut_cwd(self, key): """ Task generator method, used internally to limit code duplication. This method may disappear anytime. """ cwd = getattr(self, key, None) if cwd: if isinstance(cwd, str): # we want a Node instance if os.path.isabs(cwd): self.ut_cwd = self.bld.root.make_node(cwd) else: self.ut_cwd = self.path.make_node(cwd) @feature('test_scripts') def make_interpreted_test(self): """Create interpreted unit tests.""" for x in ['test_scripts_source', 'test_scripts_template']: if not hasattr(self, x): Logs.warn('a test_scripts taskgen i missing %s' % x) return self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False)) script_nodes = self.to_nodes(self.test_scripts_source) for script_node in script_nodes: tsk = self.create_task('utest', [script_node]) tsk.vars = lst + tsk.vars tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd()) self.handle_ut_cwd('test_scripts_cwd') env = getattr(self, 'test_scripts_env', None) if env: self.ut_env = env else: self.ut_env = dict(os.environ) paths = getattr(self, 'test_scripts_paths', {}) for (k,v) in paths.items(): p = self.ut_env.get(k, '').split(os.pathsep) if isinstance(v, str): v = v.split(os.pathsep) self.ut_env[k] = os.pathsep.join(p + v) self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env]) @feature('test') @after_method('apply_link', 'process_use') def make_test(self): """Create the unit test task. There can be only one unit test task by task generator.""" if not getattr(self, 'link_task', None): return tsk = self.create_task('utest', self.link_task.outputs) if getattr(self, 'ut_str', None): self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) tsk.vars = tsk.vars + lst self.env.append_value('UT_DEPS', self.ut_str) self.handle_ut_cwd('ut_cwd') if not hasattr(self, 'ut_paths'): paths = [] for x in self.tmp_use_sorted: try: y = self.bld.get_tgen_by_name(x).link_task except AttributeError: pass else: if not isinstance(y, ccroot.stlink_task): paths.append(y.outputs[0].parent.abspath()) self.ut_paths = os.pathsep.join(paths) + os.pathsep if not hasattr(self, 'ut_env'): self.ut_env = dct = dict(os.environ) def add_path(var): dct[var] = self.ut_paths + dct.get(var,'') if Utils.is_win32: add_path('PATH') elif Utils.unversioned_sys_platform() == 'darwin': add_path('DYLD_LIBRARY_PATH') add_path('LD_LIBRARY_PATH') else: add_path('LD_LIBRARY_PATH') if not hasattr(self, 'ut_cmd'): self.ut_cmd = getattr(Options.options, 'testcmd', False) self.env.append_value('UT_DEPS', str(self.ut_cmd)) self.env.append_value('UT_DEPS', self.ut_paths) self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env]) @taskgen_method def add_test_results(self, tup): """Override and return tup[1] to interrupt the build immediately if a test does not run""" Logs.debug("ut: %r", tup) try: self.utest_results.append(tup) except AttributeError: self.utest_results = [tup] try: self.bld.utest_results.append(tup) except AttributeError: self.bld.utest_results = [tup] @Task.deep_inputs class utest(Task.Task): """ Execute a unit test """ color = 'PINK' after = ['vnum', 'inst'] vars = ['UT_DEPS'] def runnable_status(self): """ Always execute the task if `waf --alltests` was used or no tests if ``waf --notests`` was used """ if getattr(Options.options, 'no_tests', False): return Task.SKIP_ME ret = super(utest, self).runnable_status() if ret == Task.SKIP_ME: if getattr(Options.options, 'all_tests', False): return Task.RUN_ME return ret def get_test_env(self): """ In general, tests may require any library built anywhere in the project. Override this method if fewer paths are needed """ return self.generator.ut_env def post_run(self): super(utest, self).post_run() if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]: self.generator.bld.task_sigs[self.uid()] = None def run(self): """ Execute the test. The execution is always successful, and the results are stored on ``self.generator.bld.utest_results`` for postprocessing. Override ``add_test_results`` to interrupt the build """ if hasattr(self.generator, 'ut_run'): return self.generator.ut_run(self) self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()]) ut_cmd = getattr(self.generator, 'ut_cmd', False) if ut_cmd: self.ut_exec = shlex.split(ut_cmd % ' '.join(self.ut_exec)) return self.exec_command(self.ut_exec) def exec_command(self, cmd, **kw): self.generator.bld.log_command(cmd, kw) if getattr(Options.options, 'dump_test_scripts', False): script_code = SCRIPT_TEMPLATE % { 'python': sys.executable, 'env': self.get_test_env(), 'cwd': self.get_cwd().abspath(), 'cmd': cmd } script_file = self.inputs[0].abspath() + '_run.py' Utils.writef(script_file, script_code, encoding='utf-8') os.chmod(script_file, Utils.O755) if Logs.verbose > 1: Logs.info('Test debug file written as %r' % script_file) proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(), stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str)) (stdout, stderr) = proc.communicate() self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr) testlock.acquire() try: return self.generator.add_test_results(tup) finally: testlock.release() def get_cwd(self): return getattr(self.generator, 'ut_cwd', self.inputs[0].parent) def summary(bld): """ Display an execution summary:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) """ lst = getattr(bld, 'utest_results', []) if lst: Logs.pprint('CYAN', 'execution summary') total = len(lst) tfail = len([x for x in lst if x[1]]) Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total)) for (f, code, out, err) in lst: if not code: Logs.pprint('GREEN', ' %s' % f) Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total)) for (f, code, out, err) in lst: if code: Logs.pprint('RED', ' %s' % f) def set_exit_code(bld): """ If any of the tests fail waf will exit with that exit code. This is useful if you have an automated build system which need to report on errors from the tests. You may use it like this: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.set_exit_code) """ lst = getattr(bld, 'utest_results', []) for (f, code, out, err) in lst: if code: msg = [] if out: msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8'))) if err: msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8'))) bld.fatal(os.linesep.join(msg)) def options(opt): """ Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options. """ opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests') opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests') opt.add_option('--clear-failed', action='store_true', default=False, help='Force failed unit tests to run again next time', dest='clear_failed_tests') opt.add_option('--testcmd', action='store', default=False, dest='testcmd', help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind') opt.add_option('--dump-test-scripts', action='store_true', default=False, help='Create python scripts to help debug tests', dest='dump_test_scripts') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/winres.py0000660000000000000000000000414100000000000022366 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Brant Young, 2007 "Process *.rc* files for C/C++: X{.rc -> [.res|.rc.o]}" import re from waflib import Task from waflib.TaskGen import extension from waflib.Tools import c_preproc @extension('.rc') def rc_file(self, node): """ Binds the .rc extension to a winrc task """ obj_ext = '.rc.o' if self.env.WINRC_TGT_F == '/fo': obj_ext = '.res' rctask = self.create_task('winrc', node, node.change_ext(obj_ext)) try: self.compiled_tasks.append(rctask) except AttributeError: self.compiled_tasks = [rctask] re_lines = re.compile( r'(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|'\ r'(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)', re.IGNORECASE | re.MULTILINE) class rc_parser(c_preproc.c_parser): """ Calculates dependencies in .rc files """ def filter_comments(self, node): """ Overrides :py:meth:`waflib.Tools.c_preproc.c_parser.filter_comments` """ code = node.read() if c_preproc.use_trigraphs: for (a, b) in c_preproc.trig_def: code = code.split(a).join(b) code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) ret = [] for m in re.finditer(re_lines, code): if m.group(2): ret.append((m.group(2), m.group(3))) else: ret.append(('include', m.group(5))) return ret class winrc(Task.Task): """ Compiles resource files """ run_str = '${WINRC} ${WINRCFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}' color = 'BLUE' def scan(self): tmp = rc_parser(self.generator.includes_nodes) tmp.start(self.inputs[0], self.env) return (tmp.nodes, tmp.names) def configure(conf): """ Detects the programs RC or windres, depending on the C/C++ compiler in use """ v = conf.env if not v.WINRC: if v.CC_NAME == 'msvc': conf.find_program('RC', var='WINRC', path_list=v.PATH) v.WINRC_TGT_F = '/fo' v.WINRC_SRC_F = '' else: conf.find_program('windres', var='WINRC', path_list=v.PATH) v.WINRC_TGT_F = '-o' v.WINRC_SRC_F = '-i' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/xlc.py0000660000000000000000000000264100000000000021650 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_xlc(conf): """ Detects the Aix C compiler """ cc = conf.find_program(['xlc_r', 'xlc'], var='CC') conf.get_xlc_version(cc) conf.env.CC_NAME = 'xlc' @conf def xlc_common_flags(conf): """ Flags required for executing the Aix C compiler """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o'] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = [] v.CCLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.LINKFLAGS_cprogram = ['-Wl,-brtl'] v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-G', '-Wl,-brtl,-bexpfull'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = [] v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_xlc() conf.find_ar() conf.xlc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/Tools/xlcxx.py0000660000000000000000000000267400000000000022236 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_xlcxx(conf): """ Detects the Aix C++ compiler """ cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX') conf.get_xlc_version(cxx) conf.env.CXX_NAME = 'xlc++' @conf def xlcxx_common_flags(conf): """ Flags required for executing the Aix C++ compiler """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o'] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-G', '-Wl,-brtl,-bexpfull'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = [] v.cxxstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_xlcxx() conf.find_ar() conf.xlcxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/Utils.py0000660000000000000000000006140300000000000021063 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Utilities and platform-specific fixes The portability fixes try to provide a consistent behavior of the Waf API through Python versions 2.5 to 3.X and across different platforms (win32, linux, etc) """ from __future__ import with_statement import atexit, os, sys, errno, inspect, re, datetime, platform, base64, signal, functools, time try: import cPickle except ImportError: import pickle as cPickle # leave this if os.name == 'posix' and sys.version_info[0] < 3: try: import subprocess32 as subprocess except ImportError: import subprocess else: import subprocess try: TimeoutExpired = subprocess.TimeoutExpired except AttributeError: class TimeoutExpired(Exception): pass from collections import deque, defaultdict try: import _winreg as winreg except ImportError: try: import winreg except ImportError: winreg = None from waflib import Errors try: from hashlib import md5 except ImportError: try: from hashlib import sha1 as md5 except ImportError: # never fail to enable potential fixes from another module pass else: try: md5().digest() except ValueError: # Fips? #2213 from hashlib import sha1 as md5 try: import threading except ImportError: if not 'JOBS' in os.environ: # no threading :-( os.environ['JOBS'] = '1' class threading(object): """ A fake threading class for platforms lacking the threading module. Use ``waf -j1`` on those platforms """ pass class Lock(object): """Fake Lock class""" def acquire(self): pass def release(self): pass threading.Lock = threading.Thread = Lock SIG_NIL = 'SIG_NIL_SIG_NIL_'.encode() """Arbitrary null value for hashes. Modify this value according to the hash function in use""" O644 = 420 """Constant representing the permissions for regular files (0644 raises a syntax error on python 3)""" O755 = 493 """Constant representing the permissions for executable files (0755 raises a syntax error on python 3)""" rot_chr = ['\\', '|', '/', '-'] "List of characters to use when displaying the throbber (progress bar)" rot_idx = 0 "Index of the current throbber character (progress bar)" class ordered_iter_dict(dict): """Ordered dictionary that provides iteration from the most recently inserted keys first""" def __init__(self, *k, **kw): self.lst = deque() dict.__init__(self, *k, **kw) def clear(self): dict.clear(self) self.lst = deque() def __setitem__(self, key, value): if key in dict.keys(self): self.lst.remove(key) dict.__setitem__(self, key, value) self.lst.append(key) def __delitem__(self, key): dict.__delitem__(self, key) try: self.lst.remove(key) except ValueError: pass def __iter__(self): return reversed(self.lst) def keys(self): return reversed(self.lst) class lru_node(object): """ Used by :py:class:`waflib.Utils.lru_cache` """ __slots__ = ('next', 'prev', 'key', 'val') def __init__(self): self.next = self self.prev = self self.key = None self.val = None class lru_cache(object): """ A simple least-recently used cache with lazy allocation """ __slots__ = ('maxlen', 'table', 'head') def __init__(self, maxlen=100): self.maxlen = maxlen """ Maximum amount of elements in the cache """ self.table = {} """ Mapping key-value """ self.head = lru_node() self.head.next = self.head self.head.prev = self.head def __getitem__(self, key): node = self.table[key] # assert(key==node.key) if node is self.head: return node.val # detach the node found node.prev.next = node.next node.next.prev = node.prev # replace the head node.next = self.head.next node.prev = self.head self.head = node.next.prev = node.prev.next = node return node.val def __setitem__(self, key, val): if key in self.table: # update the value for an existing key node = self.table[key] node.val = val self.__getitem__(key) else: if len(self.table) < self.maxlen: # the very first item is unused until the maximum is reached node = lru_node() node.prev = self.head node.next = self.head.next node.prev.next = node.next.prev = node else: node = self.head = self.head.next try: # that's another key del self.table[node.key] except KeyError: pass node.key = key node.val = val self.table[key] = node class lazy_generator(object): def __init__(self, fun, params): self.fun = fun self.params = params def __iter__(self): return self def __next__(self): try: it = self.it except AttributeError: it = self.it = self.fun(*self.params) return next(it) next = __next__ is_win32 = os.sep == '\\' or sys.platform == 'win32' or os.name == 'nt' # msys2 """ Whether this system is a Windows series """ def readf(fname, m='r', encoding='latin-1'): """ Reads an entire file into a string. See also :py:meth:`waflib.Node.Node.readf`:: def build(ctx): from waflib import Utils txt = Utils.readf(self.path.find_node('wscript').abspath()) txt = ctx.path.find_node('wscript').read() :type fname: string :param fname: Path to file :type m: string :param m: Open mode :type encoding: string :param encoding: encoding value, only used for python 3 :rtype: string :return: Content of the file """ if sys.hexversion > 0x3000000 and not 'b' in m: m += 'b' with open(fname, m) as f: txt = f.read() if encoding: txt = txt.decode(encoding) else: txt = txt.decode() else: with open(fname, m) as f: txt = f.read() return txt def writef(fname, data, m='w', encoding='latin-1'): """ Writes an entire file from a string. See also :py:meth:`waflib.Node.Node.writef`:: def build(ctx): from waflib import Utils txt = Utils.writef(self.path.make_node('i_like_kittens').abspath(), 'some data') self.path.make_node('i_like_kittens').write('some data') :type fname: string :param fname: Path to file :type data: string :param data: The contents to write to the file :type m: string :param m: Open mode :type encoding: string :param encoding: encoding value, only used for python 3 """ if sys.hexversion > 0x3000000 and not 'b' in m: data = data.encode(encoding) m += 'b' with open(fname, m) as f: f.write(data) def h_file(fname): """ Computes a hash value for a file by using md5. Use the md5_tstamp extension to get faster build hashes if necessary. :type fname: string :param fname: path to the file to hash :return: hash of the file contents :rtype: string or bytes """ m = md5() with open(fname, 'rb') as f: while fname: fname = f.read(200000) m.update(fname) return m.digest() def readf_win32(f, m='r', encoding='latin-1'): flags = os.O_NOINHERIT | os.O_RDONLY if 'b' in m: flags |= os.O_BINARY if '+' in m: flags |= os.O_RDWR try: fd = os.open(f, flags) except OSError: raise IOError('Cannot read from %r' % f) if sys.hexversion > 0x3000000 and not 'b' in m: m += 'b' with os.fdopen(fd, m) as f: txt = f.read() if encoding: txt = txt.decode(encoding) else: txt = txt.decode() else: with os.fdopen(fd, m) as f: txt = f.read() return txt def writef_win32(f, data, m='w', encoding='latin-1'): if sys.hexversion > 0x3000000 and not 'b' in m: data = data.encode(encoding) m += 'b' flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT if 'b' in m: flags |= os.O_BINARY if '+' in m: flags |= os.O_RDWR try: fd = os.open(f, flags) except OSError: raise OSError('Cannot write to %r' % f) with os.fdopen(fd, m) as f: f.write(data) def h_file_win32(fname): try: fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT) except OSError: raise OSError('Cannot read from %r' % fname) m = md5() with os.fdopen(fd, 'rb') as f: while fname: fname = f.read(200000) m.update(fname) return m.digest() # always save these readf_unix = readf writef_unix = writef h_file_unix = h_file if hasattr(os, 'O_NOINHERIT') and sys.hexversion < 0x3040000: # replace the default functions readf = readf_win32 writef = writef_win32 h_file = h_file_win32 try: x = ''.encode('hex') except LookupError: import binascii def to_hex(s): ret = binascii.hexlify(s) if not isinstance(ret, str): ret = ret.decode('utf-8') return ret else: def to_hex(s): return s.encode('hex') to_hex.__doc__ = """ Return the hexadecimal representation of a string :param s: string to convert :type s: string """ def listdir_win32(s): """ Lists the contents of a folder in a portable manner. On Win32, returns the list of drive letters: ['C:', 'X:', 'Z:'] when an empty string is given. :type s: string :param s: a string, which can be empty on Windows """ if not s: try: import ctypes except ImportError: # there is nothing much we can do return [x + ':\\' for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'] else: dlen = 4 # length of "?:\\x00" maxdrives = 26 buf = ctypes.create_string_buffer(maxdrives * dlen) ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen, ctypes.byref(buf)) return [ str(buf.raw[4*i:4*i+2].decode('ascii')) for i in range(int(ndrives/dlen)) ] if len(s) == 2 and s[1] == ":": s += os.sep if not os.path.isdir(s): e = OSError('%s is not a directory' % s) e.errno = errno.ENOENT raise e return os.listdir(s) listdir = os.listdir if is_win32: listdir = listdir_win32 def num2ver(ver): """ Converts a string, tuple or version number into an integer. The number is supposed to have at most 4 digits:: from waflib.Utils import num2ver num2ver('1.3.2') == num2ver((1,3,2)) == num2ver((1,3,2,0)) :type ver: string or tuple of numbers :param ver: a version number """ if isinstance(ver, str): ver = tuple(ver.split('.')) if isinstance(ver, tuple): ret = 0 for i in range(4): if i < len(ver): ret += 256**(3 - i) * int(ver[i]) return ret return ver def to_list(val): """ Converts a string argument to a list by splitting it by spaces. Returns the object if not a string:: from waflib.Utils import to_list lst = to_list('a b c d') :param val: list of string or space-separated string :rtype: list :return: Argument converted to list """ if isinstance(val, str): return val.split() else: return val def console_encoding(): try: import ctypes except ImportError: pass else: try: codepage = ctypes.windll.kernel32.GetConsoleCP() except AttributeError: pass else: if codepage: return 'cp%d' % codepage return sys.stdout.encoding or ('cp1252' if is_win32 else 'latin-1') def split_path_unix(path): return path.split('/') def split_path_cygwin(path): if path.startswith('//'): ret = path.split('/')[2:] ret[0] = '/' + ret[0] return ret return path.split('/') re_sp = re.compile('[/\\\\]+') def split_path_win32(path): if path.startswith('\\\\'): ret = re_sp.split(path)[1:] ret[0] = '\\\\' + ret[0] if ret[0] == '\\\\?': return ret[1:] return ret return re_sp.split(path) msysroot = None def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): # msys paths can be in the form /usr/bin global msysroot if not msysroot: # msys has python 2.7 or 3, so we can use this msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: # Consider this an MSYSTEM environment if $MSYSTEM is set and python # reports is executable from a unix like path on a windows host. if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): split_path = split_path_msys else: split_path = split_path_win32 else: split_path = split_path_unix split_path.__doc__ = """ Splits a path by / or \\; do not confuse this function with with ``os.path.split`` :type path: string :param path: path to split :return: list of string """ def check_dir(path): """ Ensures that a directory exists (similar to ``mkdir -p``). :type path: string :param path: Path to directory :raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added. """ if not os.path.isdir(path): try: os.makedirs(path) except OSError as e: if not os.path.isdir(path): raise Errors.WafError('Cannot create the folder %r' % path, ex=e) def check_exe(name, env=None): """ Ensures that a program exists :type name: string :param name: path to the program :param env: configuration object :type env: :py:class:`waflib.ConfigSet.ConfigSet` :return: path of the program or None :raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added. """ if not name: raise ValueError('Cannot execute an empty string!') def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(name) if fpath and is_exe(name): return os.path.abspath(name) else: env = env or os.environ for path in env['PATH'].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, name) if is_exe(exe_file): return os.path.abspath(exe_file) return None def def_attrs(cls, **kw): """ Sets default attributes on a class instance :type cls: class :param cls: the class to update the given attributes in. :type kw: dict :param kw: dictionary of attributes names and values. """ for k, v in kw.items(): if not hasattr(cls, k): setattr(cls, k, v) def quote_define_name(s): """ Converts a string into an identifier suitable for C defines. :type s: string :param s: String to convert :rtype: string :return: Identifier suitable for C defines """ fu = re.sub('[^a-zA-Z0-9]', '_', s) fu = re.sub('_+', '_', fu) fu = fu.upper() return fu re_sh = re.compile('\\s|\'|"') """ Regexp used for shell_escape below """ def shell_escape(cmd): """ Escapes a command: ['ls', '-l', 'arg space'] -> ls -l 'arg space' """ if isinstance(cmd, str): return cmd return ' '.join(repr(x) if re_sh.search(x) else x for x in cmd) def h_list(lst): """ Hashes lists of ordered data. Using hash(tup) for tuples would be much more efficient, but Python now enforces hash randomization :param lst: list to hash :type lst: list of strings :return: hash of the list """ return md5(repr(lst).encode()).digest() if sys.hexversion < 0x3000000: def h_list_python2(lst): return md5(repr(lst)).digest() h_list_python2.__doc__ = h_list.__doc__ h_list = h_list_python2 def h_fun(fun): """ Hash functions :param fun: function to hash :type fun: function :return: hash of the function :rtype: string or bytes """ try: return fun.code except AttributeError: if isinstance(fun, functools.partial): code = list(fun.args) # The method items() provides a sequence of tuples where the first element # represents an optional argument of the partial function application # # The sorting result outcome will be consistent because: # 1. tuples are compared in order of their elements # 2. optional argument namess are unique code.extend(sorted(fun.keywords.items())) code.append(h_fun(fun.func)) fun.code = h_list(code) return fun.code try: h = inspect.getsource(fun) except EnvironmentError: h = 'nocode' try: fun.code = h except AttributeError: pass return h def h_cmd(ins): """ Hashes objects recursively :param ins: input object :type ins: string or list or tuple or function :rtype: string or bytes """ # this function is not meant to be particularly fast if isinstance(ins, str): # a command is either a string ret = ins elif isinstance(ins, list) or isinstance(ins, tuple): # or a list of functions/strings ret = str([h_cmd(x) for x in ins]) else: # or just a python function ret = str(h_fun(ins)) if sys.hexversion > 0x3000000: ret = ret.encode('latin-1', 'xmlcharrefreplace') return ret reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}") def subst_vars(expr, params): """ Replaces ${VAR} with the value of VAR taken from a dict or a config set:: from waflib import Utils s = Utils.subst_vars('${PREFIX}/bin', env) :type expr: string :param expr: String to perform substitution on :param params: Dictionary or config set to look up variable values. """ def repl_var(m): if m.group(1): return '\\' if m.group(2): return '$' try: # ConfigSet instances may contain lists return params.get_flat(m.group(3)) except AttributeError: return params[m.group(3)] # if you get a TypeError, it means that 'expr' is not a string... # Utils.subst_vars(None, env) will not work return reg_subst.sub(repl_var, expr) def destos_to_binfmt(key): """ Returns the binary format based on the unversioned platform name, and defaults to ``elf`` if nothing is found. :param key: platform name :type key: string :return: string representing the binary format """ if key == 'darwin': return 'mac-o' elif key in ('win32', 'cygwin', 'uwin', 'msys'): return 'pe' return 'elf' def unversioned_sys_platform(): """ Returns the unversioned platform name. Some Python platform names contain versions, that depend on the build environment, e.g. linux2, freebsd6, etc. This returns the name without the version number. Exceptions are os2 and win32, which are returned verbatim. :rtype: string :return: Unversioned platform name """ s = sys.platform if s.startswith('java'): # The real OS is hidden under the JVM. from java.lang import System s = System.getProperty('os.name') # see http://lopica.sourceforge.net/os.html for a list of possible values if s == 'Mac OS X': return 'darwin' elif s.startswith('Windows '): return 'win32' elif s == 'OS/2': return 'os2' elif s == 'HP-UX': return 'hp-ux' elif s in ('SunOS', 'Solaris'): return 'sunos' else: s = s.lower() # powerpc == darwin for our purposes if s == 'powerpc': return 'darwin' if s == 'win32' or s == 'os2': return s if s == 'cli' and os.name == 'nt': # ironpython is only on windows as far as we know return 'win32' return re.split(r'\d+$', s)[0] def nada(*k, **kw): """ Does nothing :return: None """ pass class Timer(object): """ Simple object for timing the execution of commands. Its string representation is the duration:: from waflib.Utils import Timer timer = Timer() a_few_operations() s = str(timer) """ def __init__(self): self.start_time = self.now() def __str__(self): delta = self.now() - self.start_time if not isinstance(delta, datetime.timedelta): delta = datetime.timedelta(seconds=delta) days = delta.days hours, rem = divmod(delta.seconds, 3600) minutes, seconds = divmod(rem, 60) seconds += delta.microseconds * 1e-6 result = '' if days: result += '%dd' % days if days or hours: result += '%dh' % hours if days or hours or minutes: result += '%dm' % minutes return '%s%.3fs' % (result, seconds) def now(self): return datetime.datetime.utcnow() if hasattr(time, 'perf_counter'): def now(self): return time.perf_counter() def read_la_file(path): """ Reads property files, used by msvc.py :param path: file to read :type path: string """ sp = re.compile(r'^([^=]+)=\'(.*)\'$') dc = {} for line in readf(path).splitlines(): try: _, left, right, _ = sp.split(line.strip()) dc[left] = right except ValueError: pass return dc def run_once(fun): """ Decorator: let a function cache its results, use like this:: @run_once def foo(k): return 345*2343 .. note:: in practice this can cause memory leaks, prefer a :py:class:`waflib.Utils.lru_cache` :param fun: function to execute :type fun: function :return: the return value of the function executed """ cache = {} def wrap(*k): try: return cache[k] except KeyError: ret = fun(*k) cache[k] = ret return ret wrap.__cache__ = cache wrap.__name__ = fun.__name__ return wrap def get_registry_app_path(key, filename): """ Returns the value of a registry key for an executable :type key: string :type filename: list of string """ if not winreg: return None try: result = winreg.QueryValue(key, "Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe" % filename[0]) except OSError: pass else: if os.path.isfile(result): return result def lib64(): """ Guess the default ``/usr/lib`` extension for 64-bit applications :return: '64' or '' :rtype: string """ # default settings for /usr/lib if os.sep == '/': if platform.architecture()[0] == '64bit': if os.path.exists('/usr/lib64') and not os.path.exists('/usr/lib32'): return '64' return '' def sane_path(p): # private function for the time being! return os.path.abspath(os.path.expanduser(p)) process_pool = [] """ List of processes started to execute sub-process commands """ def get_process(): """ Returns a process object that can execute commands as sub-processes :rtype: subprocess.Popen """ try: return process_pool.pop() except IndexError: filepath = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'processor.py' cmd = [sys.executable, '-c', readf(filepath)] return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0, close_fds=not is_win32) def run_prefork_process(cmd, kwargs, cargs): """ Delegates process execution to a pre-forked process instance. """ if not kwargs.get('env'): kwargs['env'] = dict(os.environ) try: obj = base64.b64encode(cPickle.dumps([cmd, kwargs, cargs])) except (TypeError, AttributeError): return run_regular_process(cmd, kwargs, cargs) proc = get_process() if not proc: return run_regular_process(cmd, kwargs, cargs) proc.stdin.write(obj) proc.stdin.write('\n'.encode()) proc.stdin.flush() obj = proc.stdout.readline() if not obj: raise OSError('Preforked sub-process %r died' % proc.pid) process_pool.append(proc) lst = cPickle.loads(base64.b64decode(obj)) # Jython wrapper failures (bash/execvp) assert len(lst) == 5 ret, out, err, ex, trace = lst if ex: if ex == 'OSError': raise OSError(trace) elif ex == 'ValueError': raise ValueError(trace) elif ex == 'TimeoutExpired': exc = TimeoutExpired(cmd, timeout=cargs['timeout'], output=out) exc.stderr = err raise exc else: raise Exception(trace) return ret, out, err def lchown(path, user=-1, group=-1): """ Change the owner/group of a path, raises an OSError if the ownership change fails. :param user: user to change :type user: int or str :param group: group to change :type group: int or str """ if isinstance(user, str): import pwd entry = pwd.getpwnam(user) if not entry: raise OSError('Unknown user %r' % user) user = entry[2] if isinstance(group, str): import grp entry = grp.getgrnam(group) if not entry: raise OSError('Unknown group %r' % group) group = entry[2] return os.lchown(path, user, group) def run_regular_process(cmd, kwargs, cargs={}): """ Executes a subprocess command by using subprocess.Popen """ proc = subprocess.Popen(cmd, **kwargs) if kwargs.get('stdout') or kwargs.get('stderr'): try: out, err = proc.communicate(**cargs) except TimeoutExpired: if kwargs.get('start_new_session') and hasattr(os, 'killpg'): os.killpg(proc.pid, signal.SIGKILL) else: proc.kill() out, err = proc.communicate() exc = TimeoutExpired(proc.args, timeout=cargs['timeout'], output=out) exc.stderr = err raise exc status = proc.returncode else: out, err = (None, None) try: status = proc.wait(**cargs) except TimeoutExpired as e: if kwargs.get('start_new_session') and hasattr(os, 'killpg'): os.killpg(proc.pid, signal.SIGKILL) else: proc.kill() proc.wait() raise e return status, out, err def run_process(cmd, kwargs, cargs={}): """ Executes a subprocess by using a pre-forked process when possible or falling back to subprocess.Popen. See :py:func:`waflib.Utils.run_prefork_process` and :py:func:`waflib.Utils.run_regular_process` """ if kwargs.get('stdout') and kwargs.get('stderr'): return run_prefork_process(cmd, kwargs, cargs) else: return run_regular_process(cmd, kwargs, cargs) def alloc_process_pool(n, force=False): """ Allocates an amount of processes to the default pool so its size is at least *n*. It is useful to call this function early so that the pre-forked processes use as little memory as possible. :param n: pool size :type n: integer :param force: if True then *n* more processes are added to the existing pool :type force: bool """ # mandatory on python2, unnecessary on python >= 3.2 global run_process, get_process, alloc_process_pool if not force: n = max(n - len(process_pool), 0) try: lst = [get_process() for x in range(n)] except OSError: run_process = run_regular_process get_process = alloc_process_pool = nada else: for x in lst: process_pool.append(x) def atexit_pool(): for k in process_pool: try: os.kill(k.pid, 9) except OSError: pass else: k.wait() # see #1889 if (sys.hexversion<0x207000f and not is_win32) or sys.hexversion>=0x306000f: atexit.register(atexit_pool) if os.environ.get('WAF_NO_PREFORK') or sys.platform == 'cli' or not sys.executable: run_process = run_regular_process get_process = alloc_process_pool = nada ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/__init__.py0000660000000000000000000000010700000000000021514 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/ansiterm.py0000660000000000000000000002527300000000000021612 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 """ Emulate a vt100 terminal in cmd.exe By wrapping sys.stdout / sys.stderr with Ansiterm, the vt100 escape characters will be interpreted and the equivalent actions will be performed with Win32 console commands. """ import os, re, sys from waflib import Utils wlock = Utils.threading.Lock() try: from ctypes import Structure, windll, c_short, c_ushort, c_ulong, c_int, byref, c_wchar, POINTER, c_long except ImportError: class AnsiTerm(object): def __init__(self, stream): self.stream = stream try: self.errors = self.stream.errors except AttributeError: pass # python 2.5 self.encoding = self.stream.encoding def write(self, txt): try: wlock.acquire() self.stream.write(txt) self.stream.flush() finally: wlock.release() def fileno(self): return self.stream.fileno() def flush(self): self.stream.flush() def isatty(self): return self.stream.isatty() else: class COORD(Structure): _fields_ = [("X", c_short), ("Y", c_short)] class SMALL_RECT(Structure): _fields_ = [("Left", c_short), ("Top", c_short), ("Right", c_short), ("Bottom", c_short)] class CONSOLE_SCREEN_BUFFER_INFO(Structure): _fields_ = [("Size", COORD), ("CursorPosition", COORD), ("Attributes", c_ushort), ("Window", SMALL_RECT), ("MaximumWindowSize", COORD)] class CONSOLE_CURSOR_INFO(Structure): _fields_ = [('dwSize', c_ulong), ('bVisible', c_int)] try: _type = unicode except NameError: _type = str to_int = lambda number, default: number and int(number) or default STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 windll.kernel32.GetStdHandle.argtypes = [c_ulong] windll.kernel32.GetStdHandle.restype = c_ulong windll.kernel32.GetConsoleScreenBufferInfo.argtypes = [c_ulong, POINTER(CONSOLE_SCREEN_BUFFER_INFO)] windll.kernel32.GetConsoleScreenBufferInfo.restype = c_long windll.kernel32.SetConsoleTextAttribute.argtypes = [c_ulong, c_ushort] windll.kernel32.SetConsoleTextAttribute.restype = c_long windll.kernel32.FillConsoleOutputCharacterW.argtypes = [c_ulong, c_wchar, c_ulong, POINTER(COORD), POINTER(c_ulong)] windll.kernel32.FillConsoleOutputCharacterW.restype = c_long windll.kernel32.FillConsoleOutputAttribute.argtypes = [c_ulong, c_ushort, c_ulong, POINTER(COORD), POINTER(c_ulong) ] windll.kernel32.FillConsoleOutputAttribute.restype = c_long windll.kernel32.SetConsoleCursorPosition.argtypes = [c_ulong, POINTER(COORD) ] windll.kernel32.SetConsoleCursorPosition.restype = c_long windll.kernel32.SetConsoleCursorInfo.argtypes = [c_ulong, POINTER(CONSOLE_CURSOR_INFO)] windll.kernel32.SetConsoleCursorInfo.restype = c_long class AnsiTerm(object): """ emulate a vt100 terminal in cmd.exe """ def __init__(self, s): self.stream = s try: self.errors = s.errors except AttributeError: pass # python2.5 self.encoding = s.encoding self.cursor_history = [] handle = (s.fileno() == 2) and STD_ERROR_HANDLE or STD_OUTPUT_HANDLE self.hconsole = windll.kernel32.GetStdHandle(handle) self._sbinfo = CONSOLE_SCREEN_BUFFER_INFO() self._csinfo = CONSOLE_CURSOR_INFO() windll.kernel32.GetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) # just to double check that the console is usable self._orig_sbinfo = CONSOLE_SCREEN_BUFFER_INFO() r = windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self._orig_sbinfo)) self._isatty = r == 1 def screen_buffer_info(self): """ Updates self._sbinfo and returns it """ windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self._sbinfo)) return self._sbinfo def clear_line(self, param): mode = param and int(param) or 0 sbinfo = self.screen_buffer_info() if mode == 1: # Clear from beginning of line to cursor position line_start = COORD(0, sbinfo.CursorPosition.Y) line_length = sbinfo.Size.X elif mode == 2: # Clear entire line line_start = COORD(sbinfo.CursorPosition.X, sbinfo.CursorPosition.Y) line_length = sbinfo.Size.X - sbinfo.CursorPosition.X else: # Clear from cursor position to end of line line_start = sbinfo.CursorPosition line_length = sbinfo.Size.X - sbinfo.CursorPosition.X chars_written = c_ulong() windll.kernel32.FillConsoleOutputCharacterW(self.hconsole, c_wchar(' '), line_length, line_start, byref(chars_written)) windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, line_length, line_start, byref(chars_written)) def clear_screen(self, param): mode = to_int(param, 0) sbinfo = self.screen_buffer_info() if mode == 1: # Clear from beginning of screen to cursor position clear_start = COORD(0, 0) clear_length = sbinfo.CursorPosition.X * sbinfo.CursorPosition.Y elif mode == 2: # Clear entire screen and return cursor to home clear_start = COORD(0, 0) clear_length = sbinfo.Size.X * sbinfo.Size.Y windll.kernel32.SetConsoleCursorPosition(self.hconsole, clear_start) else: # Clear from cursor position to end of screen clear_start = sbinfo.CursorPosition clear_length = ((sbinfo.Size.X - sbinfo.CursorPosition.X) + sbinfo.Size.X * (sbinfo.Size.Y - sbinfo.CursorPosition.Y)) chars_written = c_ulong() windll.kernel32.FillConsoleOutputCharacterW(self.hconsole, c_wchar(' '), clear_length, clear_start, byref(chars_written)) windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, clear_length, clear_start, byref(chars_written)) def push_cursor(self, param): sbinfo = self.screen_buffer_info() self.cursor_history.append(sbinfo.CursorPosition) def pop_cursor(self, param): if self.cursor_history: old_pos = self.cursor_history.pop() windll.kernel32.SetConsoleCursorPosition(self.hconsole, old_pos) def set_cursor(self, param): y, sep, x = param.partition(';') x = to_int(x, 1) - 1 y = to_int(y, 1) - 1 sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, x), sbinfo.Size.X), min(max(0, y), sbinfo.Size.Y) ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def set_column(self, param): x = to_int(param, 1) - 1 sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, x), sbinfo.Size.X), sbinfo.CursorPosition.Y ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def move_cursor(self, x_offset=0, y_offset=0): sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, sbinfo.CursorPosition.X + x_offset), sbinfo.Size.X), min(max(0, sbinfo.CursorPosition.Y + y_offset), sbinfo.Size.Y) ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def move_up(self, param): self.move_cursor(y_offset = -to_int(param, 1)) def move_down(self, param): self.move_cursor(y_offset = to_int(param, 1)) def move_left(self, param): self.move_cursor(x_offset = -to_int(param, 1)) def move_right(self, param): self.move_cursor(x_offset = to_int(param, 1)) def next_line(self, param): sbinfo = self.screen_buffer_info() self.move_cursor( x_offset = -sbinfo.CursorPosition.X, y_offset = to_int(param, 1) ) def prev_line(self, param): sbinfo = self.screen_buffer_info() self.move_cursor( x_offset = -sbinfo.CursorPosition.X, y_offset = -to_int(param, 1) ) def rgb2bgr(self, c): return ((c&1) << 2) | (c&2) | ((c&4)>>2) def set_color(self, param): cols = param.split(';') sbinfo = self.screen_buffer_info() attr = sbinfo.Attributes for c in cols: c = to_int(c, 0) if 29 < c < 38: # fgcolor attr = (attr & 0xfff0) | self.rgb2bgr(c - 30) elif 39 < c < 48: # bgcolor attr = (attr & 0xff0f) | (self.rgb2bgr(c - 40) << 4) elif c == 0: # reset attr = self._orig_sbinfo.Attributes elif c == 1: # strong attr |= 0x08 elif c == 4: # blink not available -> bg intensity attr |= 0x80 elif c == 7: # negative attr = (attr & 0xff88) | ((attr & 0x70) >> 4) | ((attr & 0x07) << 4) windll.kernel32.SetConsoleTextAttribute(self.hconsole, attr) def show_cursor(self,param): self._csinfo.bVisible = 1 windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) def hide_cursor(self,param): self._csinfo.bVisible = 0 windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) ansi_command_table = { 'A': move_up, 'B': move_down, 'C': move_right, 'D': move_left, 'E': next_line, 'F': prev_line, 'G': set_column, 'H': set_cursor, 'f': set_cursor, 'J': clear_screen, 'K': clear_line, 'h': show_cursor, 'l': hide_cursor, 'm': set_color, 's': push_cursor, 'u': pop_cursor, } # Match either the escape sequence or text not containing escape sequence ansi_tokens = re.compile(r'(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))') def write(self, text): try: wlock.acquire() if self._isatty: for param, cmd, txt in self.ansi_tokens.findall(text): if cmd: cmd_func = self.ansi_command_table.get(cmd) if cmd_func: cmd_func(self, param) else: self.writeconsole(txt) else: # no support for colors in the console, just output the text: # eclipse or msys may be able to interpret the escape sequences self.stream.write(text) finally: wlock.release() def writeconsole(self, txt): chars_written = c_ulong() writeconsole = windll.kernel32.WriteConsoleA if isinstance(txt, _type): writeconsole = windll.kernel32.WriteConsoleW # MSDN says that there is a shared buffer of 64 KB for the console # writes. Attempt to not get ERROR_NOT_ENOUGH_MEMORY, see waf issue #746 done = 0 todo = len(txt) chunk = 32<<10 while todo != 0: doing = min(chunk, todo) buf = txt[done:done+doing] r = writeconsole(self.hconsole, buf, doing, byref(chars_written), None) if r == 0: chunk >>= 1 continue done += doing todo -= doing def fileno(self): return self.stream.fileno() def flush(self): pass def isatty(self): return self._isatty if sys.stdout.isatty() or sys.stderr.isatty(): handle = sys.stdout.isatty() and STD_OUTPUT_HANDLE or STD_ERROR_HANDLE console = windll.kernel32.GetStdHandle(handle) sbinfo = CONSOLE_SCREEN_BUFFER_INFO() def get_term_cols(): windll.kernel32.GetConsoleScreenBufferInfo(console, byref(sbinfo)) # Issue 1401 - the progress bar cannot reach the last character return sbinfo.Size.X - 1 # just try and see try: import struct, fcntl, termios except ImportError: pass else: if (sys.stdout.isatty() or sys.stderr.isatty()) and os.environ.get('TERM', '') not in ('dumb', 'emacs'): FD = sys.stdout.isatty() and sys.stdout.fileno() or sys.stderr.fileno() def fun(): return struct.unpack("HHHH", fcntl.ioctl(FD, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)))[1] try: fun() except Exception as e: pass else: get_term_cols = fun ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/extras/__init__.py0000660000000000000000000000010700000000000023022 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2010 (ita) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/extras/batched_cc.py0000660000000000000000000001112600000000000023325 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2015 (ita) """ Instead of compiling object files one by one, c/c++ compilers are often able to compile at once: cc -c ../file1.c ../file2.c ../file3.c Files are output on the directory where the compiler is called, and dependencies are more difficult to track (do not run the command on all source files if only one file changes) As such, we do as if the files were compiled one by one, but no command is actually run: replace each cc/cpp Task by a TaskSlave. A new task called TaskMaster collects the signatures from each slave and finds out the command-line to run. Just import this module to start using it: def build(bld): bld.load('batched_cc') Note that this is provided as an example, unity builds are recommended for best performance results (fewer tasks and fewer jobs to execute). See waflib/extras/unity.py. """ from waflib import Task, Utils from waflib.TaskGen import extension, feature, after_method from waflib.Tools import c, cxx MAX_BATCH = 50 c_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}' c_fun, _ = Task.compile_fun_noshell(c_str) cxx_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}' cxx_fun, _ = Task.compile_fun_noshell(cxx_str) count = 70000 class batch(Task.Task): color = 'PINK' after = ['c', 'cxx'] before = ['cprogram', 'cshlib', 'cstlib', 'cxxprogram', 'cxxshlib', 'cxxstlib'] def uid(self): return Utils.h_list([Task.Task.uid(self), self.generator.idx, self.generator.path.abspath(), self.generator.target]) def __str__(self): return 'Batch compilation for %d slaves' % len(self.slaves) def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.slaves = [] self.inputs = [] self.hasrun = 0 global count count += 1 self.idx = count def add_slave(self, slave): self.slaves.append(slave) self.set_run_after(slave) def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER for t in self.slaves: #if t.executed: if t.hasrun != Task.SKIPPED: return Task.RUN_ME return Task.SKIP_ME def get_cwd(self): return self.slaves[0].outputs[0].parent def batch_incpaths(self): st = self.env.CPPPATH_ST return [st % node.abspath() for node in self.generator.includes_nodes] def run(self): self.outputs = [] srclst = [] slaves = [] for t in self.slaves: if t.hasrun != Task.SKIPPED: slaves.append(t) srclst.append(t.inputs[0].abspath()) self.env.SRCLST = srclst if self.slaves[0].__class__.__name__ == 'c': ret = c_fun(self) else: ret = cxx_fun(self) if ret: return ret for t in slaves: t.old_post_run() def hook(cls_type): def n_hook(self, node): ext = '.obj' if self.env.CC_NAME == 'msvc' else '.o' name = node.name k = name.rfind('.') if k >= 0: basename = name[:k] + ext else: basename = name + ext outdir = node.parent.get_bld().make_node('%d' % self.idx) outdir.mkdir() out = outdir.find_or_declare(basename) task = self.create_task(cls_type, node, out) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] if not getattr(self, 'masters', None): self.masters = {} self.allmasters = [] def fix_path(tsk): if self.env.CC_NAME == 'msvc': tsk.env.append_unique('CXX_TGT_F_BATCHED', '/Fo%s\\' % outdir.abspath()) if not node.parent in self.masters: m = self.masters[node.parent] = self.master = self.create_task('batch') fix_path(m) self.allmasters.append(m) else: m = self.masters[node.parent] if len(m.slaves) > MAX_BATCH: m = self.masters[node.parent] = self.master = self.create_task('batch') fix_path(m) self.allmasters.append(m) m.add_slave(task) return task return n_hook extension('.c')(hook('c')) extension('.cpp','.cc','.cxx','.C','.c++')(hook('cxx')) @feature('cprogram', 'cshlib', 'cstaticlib', 'cxxprogram', 'cxxshlib', 'cxxstlib') @after_method('apply_link') def link_after_masters(self): if getattr(self, 'allmasters', None): for m in self.allmasters: self.link_task.set_run_after(m) # Modify the c and cxx task classes - in theory it would be best to # create subclasses and to re-map the c/c++ extensions for x in ('c', 'cxx'): t = Task.classes[x] def run(self): pass def post_run(self): pass setattr(t, 'oldrun', getattr(t, 'run', None)) setattr(t, 'run', run) setattr(t, 'old_post_run', t.post_run) setattr(t, 'post_run', post_run) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/extras/biber.py0000660000000000000000000000313500000000000022352 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Latex processing using "biber" """ import os from waflib import Task, Logs from waflib.Tools import tex as texmodule class tex(texmodule.tex): biber_fun, _ = Task.compile_fun('${BIBER} ${BIBERFLAGS} ${SRCFILE}',shell=False) biber_fun.__doc__ = """ Execute the program **biber** """ def bibfile(self): return None def bibunits(self): self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = self.aux_nodes[0].name[:-4] if not self.env['PROMPT_LATEX']: self.env.append_unique('BIBERFLAGS', '--quiet') path = self.aux_nodes[0].abspath()[:-4] + '.bcf' if os.path.isfile(path): Logs.warn('calling biber') self.check_status('error when calling biber, check %s.blg for errors' % (self.env.SRCFILE), self.biber_fun()) else: super(tex, self).bibfile() super(tex, self).bibunits() class latex(tex): texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) class pdflatex(tex): texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) class xelatex(tex): texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False) def configure(self): """ Almost the same as in tex.py, but try to detect 'biber' """ v = self.env for p in ' biber tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split(): try: self.find_program(p, var=p.upper()) except self.errors.ConfigurationError: pass v['DVIPSFLAGS'] = '-Ppdf' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/extras/bjam.py0000660000000000000000000000746500000000000022212 0ustar00rootroot00000000000000#! /usr/bin/env python # per rosengren 2011 from os import sep, readlink from waflib import Logs from waflib.TaskGen import feature, after_method from waflib.Task import Task, always_run def options(opt): grp = opt.add_option_group('Bjam Options') grp.add_option('--bjam_src', default=None, help='You can find it in /tools/jam/src') grp.add_option('--bjam_uname', default='linuxx86_64', help='bjam is built in /bin./bjam') grp.add_option('--bjam_config', default=None) grp.add_option('--bjam_toolset', default=None) def configure(cnf): if not cnf.env.BJAM_SRC: cnf.env.BJAM_SRC = cnf.options.bjam_src if not cnf.env.BJAM_UNAME: cnf.env.BJAM_UNAME = cnf.options.bjam_uname try: cnf.find_program('bjam', path_list=[ cnf.env.BJAM_SRC + sep + 'bin.' + cnf.env.BJAM_UNAME ]) except Exception: cnf.env.BJAM = None if not cnf.env.BJAM_CONFIG: cnf.env.BJAM_CONFIG = cnf.options.bjam_config if not cnf.env.BJAM_TOOLSET: cnf.env.BJAM_TOOLSET = cnf.options.bjam_toolset @feature('bjam') @after_method('process_rule') def process_bjam(self): if not self.bld.env.BJAM: self.create_task('bjam_creator') self.create_task('bjam_build') self.create_task('bjam_installer') if getattr(self, 'always', False): always_run(bjam_creator) always_run(bjam_build) always_run(bjam_installer) class bjam_creator(Task): ext_out = 'bjam_exe' vars=['BJAM_SRC', 'BJAM_UNAME'] def run(self): env = self.env gen = self.generator bjam = gen.bld.root.find_dir(env.BJAM_SRC) if not bjam: Logs.error('Can not find bjam source') return -1 bjam_exe_relpath = 'bin.' + env.BJAM_UNAME + '/bjam' bjam_exe = bjam.find_resource(bjam_exe_relpath) if bjam_exe: env.BJAM = bjam_exe.srcpath() return 0 bjam_cmd = ['./build.sh'] Logs.debug('runner: ' + bjam.srcpath() + '> ' + str(bjam_cmd)) result = self.exec_command(bjam_cmd, cwd=bjam.srcpath()) if not result == 0: Logs.error('bjam failed') return -1 bjam_exe = bjam.find_resource(bjam_exe_relpath) if bjam_exe: env.BJAM = bjam_exe.srcpath() return 0 Logs.error('bjam failed') return -1 class bjam_build(Task): ext_in = 'bjam_exe' ext_out = 'install' vars = ['BJAM_TOOLSET'] def run(self): env = self.env gen = self.generator path = gen.path bld = gen.bld if hasattr(gen, 'root'): build_root = path.find_node(gen.root) else: build_root = path jam = bld.srcnode.find_resource(env.BJAM_CONFIG) if jam: Logs.debug('bjam: Using jam configuration from ' + jam.srcpath()) jam_rel = jam.relpath_gen(build_root) else: Logs.warn('No build configuration in build_config/user-config.jam. Using default') jam_rel = None bjam_exe = bld.srcnode.find_node(env.BJAM) if not bjam_exe: Logs.error('env.BJAM is not set') return -1 bjam_exe_rel = bjam_exe.relpath_gen(build_root) cmd = ([bjam_exe_rel] + (['--user-config=' + jam_rel] if jam_rel else []) + ['--stagedir=' + path.get_bld().path_from(build_root)] + ['--debug-configuration'] + ['--with-' + lib for lib in self.generator.target] + (['toolset=' + env.BJAM_TOOLSET] if env.BJAM_TOOLSET else []) + ['link=' + 'shared'] + ['variant=' + 'release'] ) Logs.debug('runner: ' + build_root.srcpath() + '> ' + str(cmd)) ret = self.exec_command(cmd, cwd=build_root.srcpath()) if ret != 0: return ret self.set_outputs(path.get_bld().ant_glob('lib/*') + path.get_bld().ant_glob('bin/*')) return 0 class bjam_installer(Task): ext_in = 'install' def run(self): gen = self.generator path = gen.path for idir, pat in (('${LIBDIR}', 'lib/*'), ('${BINDIR}', 'bin/*')): files = [] for n in path.get_bld().ant_glob(pat): try: t = readlink(n.srcpath()) gen.bld.symlink_as(sep.join([idir, n.name]), t, postpone=False) except OSError: files.append(n) gen.bld.install_files(idir, files, postpone=False) return 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/extras/blender.py0000660000000000000000000000577500000000000022716 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Michal Proszek, 2014 (poxip) """ Detect the version of Blender, path and install the extension: def options(opt): opt.load('blender') def configure(cnf): cnf.load('blender') def build(bld): bld(name='io_mesh_raw', feature='blender', files=['file1.py', 'file2.py'] ) If name variable is empty, files are installed in scripts/addons, otherwise scripts/addons/name Use ./waf configure --system to set the installation directory to system path """ import os import re from getpass import getuser from waflib import Utils from waflib.TaskGen import feature from waflib.Configure import conf def options(opt): opt.add_option( '-s', '--system', dest='directory_system', default=False, action='store_true', help='determines installation directory (default: user)' ) @conf def find_blender(ctx): '''Return version number of blender, if not exist return None''' blender = ctx.find_program('blender') output = ctx.cmd_and_log(blender + ['--version']) m = re.search(r'Blender\s*((\d+(\.|))*)', output) if not m: ctx.fatal('Could not retrieve blender version') try: blender_version = m.group(1) except IndexError: ctx.fatal('Could not retrieve blender version') ctx.env['BLENDER_VERSION'] = blender_version return blender @conf def configure_paths(ctx): """Setup blender paths""" # Get the username user = getuser() _platform = Utils.unversioned_sys_platform() config_path = {'user': '', 'system': ''} if _platform.startswith('linux'): config_path['user'] = '/home/%s/.config/blender/' % user config_path['system'] = '/usr/share/blender/' elif _platform == 'darwin': # MAC OS X config_path['user'] = \ '/Users/%s/Library/Application Support/Blender/' % user config_path['system'] = '/Library/Application Support/Blender/' elif Utils.is_win32: # Windows appdata_path = ctx.getenv('APPDATA').replace('\\', '/') homedrive = ctx.getenv('HOMEDRIVE').replace('\\', '/') config_path['user'] = '%s/Blender Foundation/Blender/' % appdata_path config_path['system'] = \ '%sAll Users/AppData/Roaming/Blender Foundation/Blender/' % homedrive else: ctx.fatal( 'Unsupported platform. ' 'Available platforms: Linux, OSX, MS-Windows.' ) blender_version = ctx.env['BLENDER_VERSION'] config_path['user'] += blender_version + '/' config_path['system'] += blender_version + '/' ctx.env['BLENDER_CONFIG_DIR'] = os.path.abspath(config_path['user']) if ctx.options.directory_system: ctx.env['BLENDER_CONFIG_DIR'] = config_path['system'] ctx.env['BLENDER_ADDONS_DIR'] = os.path.join( ctx.env['BLENDER_CONFIG_DIR'], 'scripts/addons' ) Utils.check_dir(ctx.env['BLENDER_ADDONS_DIR']) def configure(ctx): ctx.find_blender() ctx.configure_paths() @feature('blender_list') def blender(self): # Two ways to install a blender extension: as a module or just .py files dest_dir = os.path.join(self.env.BLENDER_ADDONS_DIR, self.get_name()) Utils.check_dir(dest_dir) self.add_install_files(install_to=dest_dir, install_from=getattr(self, 'files', '.')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/extras/boo.py0000660000000000000000000000435000000000000022046 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Yannick LM 2011 """ Support for the boo programming language, for example:: bld(features = "boo", # necessary feature source = "src.boo", # list of boo files gen = "world.dll", # target type = "library", # library/exe ("-target:xyz" flag) name = "world" # necessary if the target is referenced by 'use' ) """ from waflib import Task from waflib.Configure import conf from waflib.TaskGen import feature, after_method, before_method, extension @extension('.boo') def boo_hook(self, node): # Nothing here yet ... # TODO filter the non-boo source files in 'apply_booc' and remove this method pass @feature('boo') @before_method('process_source') def apply_booc(self): """Create a booc task """ src_nodes = self.to_nodes(self.source) out_node = self.path.find_or_declare(self.gen) self.boo_task = self.create_task('booc', src_nodes, [out_node]) # Set variables used by the 'booc' task self.boo_task.env.OUT = '-o:%s' % out_node.abspath() # type is "exe" by default type = getattr(self, "type", "exe") self.boo_task.env.BOO_TARGET_TYPE = "-target:%s" % type @feature('boo') @after_method('apply_boo') def use_boo(self): """" boo applications honor the **use** keyword:: """ dep_names = self.to_list(getattr(self, 'use', [])) for dep_name in dep_names: dep_task_gen = self.bld.get_tgen_by_name(dep_name) if not dep_task_gen: continue dep_task_gen.post() dep_task = getattr(dep_task_gen, 'boo_task', None) if not dep_task: # Try a cs task: dep_task = getattr(dep_task_gen, 'cs_task', None) if not dep_task: # Try a link task: dep_task = getattr(dep_task, 'link_task', None) if not dep_task: # Abort ... continue self.boo_task.set_run_after(dep_task) # order self.boo_task.dep_nodes.extend(dep_task.outputs) # dependency self.boo_task.env.append_value('BOO_FLAGS', '-reference:%s' % dep_task.outputs[0].abspath()) class booc(Task.Task): """Compiles .boo files """ color = 'YELLOW' run_str = '${BOOC} ${BOO_FLAGS} ${BOO_TARGET_TYPE} ${OUT} ${SRC}' @conf def check_booc(self): self.find_program('booc', 'BOOC') self.env.BOO_FLAGS = ['-nologo'] def configure(self): """Check that booc is available """ self.check_booc() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/boost.py0000660000000000000000000004417500000000000022426 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # # partially based on boost.py written by Gernot Vormayr # written by Ruediger Sonderfeld , 2008 # modified by Bjoern Michaelsen, 2008 # modified by Luca Fossati, 2008 # rewritten for waf 1.5.1, Thomas Nagy, 2008 # rewritten for waf 1.6.2, Sylvain Rouquette, 2011 ''' This is an extra tool, not bundled with the default waf binary. To add the boost tool to the waf file: $ ./waf-light --tools=compat15,boost or, if you have waf >= 1.6.2 $ ./waf update --files=boost When using this tool, the wscript will look like: def options(opt): opt.load('compiler_cxx boost') def configure(conf): conf.load('compiler_cxx boost') conf.check_boost(lib='system filesystem') def build(bld): bld(source='main.cpp', target='app', use='BOOST') Options are generated, in order to specify the location of boost includes/libraries. The `check_boost` configuration function allows to specify the used boost libraries. It can also provide default arguments to the --boost-mt command-line arguments. Everything will be packaged together in a BOOST component that you can use. When using MSVC, a lot of compilation flags need to match your BOOST build configuration: - you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined. Errors: C4530 - boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC So before calling `conf.check_boost` you might want to disabling by adding conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB'] Errors: - boost might also be compiled with /MT, which links the runtime statically. If you have problems with redefined symbols, self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB'] self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc'] Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases. ''' import sys import re from waflib import Utils, Logs, Errors from waflib.Configure import conf from waflib.TaskGen import feature, after_method BOOST_LIBS = ['/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib'] BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include'] BOOST_VERSION_FILE = 'boost/version.hpp' BOOST_VERSION_CODE = ''' #include #include int main() { std::cout << BOOST_LIB_VERSION << ":" << BOOST_VERSION << std::endl; } ''' BOOST_ERROR_CODE = ''' #include int main() { boost::system::error_code c; } ''' PTHREAD_CODE = ''' #include static void* f(void*) { return 0; } int main() { pthread_t th; pthread_attr_t attr; pthread_attr_init(&attr); pthread_create(&th, &attr, &f, 0); pthread_join(th, 0); pthread_cleanup_push(0, 0); pthread_cleanup_pop(0); pthread_attr_destroy(&attr); } ''' BOOST_THREAD_CODE = ''' #include int main() { boost::thread t; } ''' BOOST_LOG_CODE = ''' #include #include #include int main() { using namespace boost::log; add_common_attributes(); add_console_log(std::clog, keywords::format = "%Message%"); BOOST_LOG_TRIVIAL(debug) << "log is working" << std::endl; } ''' # toolsets from {boost_dir}/tools/build/v2/tools/common.jam PLATFORM = Utils.unversioned_sys_platform() detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il' detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang' detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc' BOOST_TOOLSETS = { 'borland': 'bcb', 'clang': detect_clang, 'como': 'como', 'cw': 'cw', 'darwin': 'xgcc', 'edg': 'edg', 'g++': detect_mingw, 'gcc': detect_mingw, 'icpc': detect_intel, 'intel': detect_intel, 'kcc': 'kcc', 'kylix': 'bck', 'mipspro': 'mp', 'mingw': 'mgw', 'msvc': 'vc', 'qcc': 'qcc', 'sun': 'sw', 'sunc++': 'sw', 'tru64cxx': 'tru', 'vacpp': 'xlc' } def options(opt): opt = opt.add_option_group('Boost Options') opt.add_option('--boost-includes', type='string', default='', dest='boost_includes', help='''path to the directory where the boost includes are, e.g., /path/to/boost_1_55_0/stage/include''') opt.add_option('--boost-libs', type='string', default='', dest='boost_libs', help='''path to the directory where the boost libs are, e.g., path/to/boost_1_55_0/stage/lib''') opt.add_option('--boost-mt', action='store_true', default=False, dest='boost_mt', help='select multi-threaded libraries') opt.add_option('--boost-abi', type='string', default='', dest='boost_abi', help='''select libraries with tags (gd for debug, static is automatically added), see doc Boost, Getting Started, chapter 6.1''') opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect', help="auto-detect boost linkage options (don't get used to it / might break other stuff)") opt.add_option('--boost-toolset', type='string', default='', dest='boost_toolset', help='force a toolset e.g. msvc, vc90, \ gcc, mingw, mgw45 (default: auto)') py_version = '%d%d' % (sys.version_info[0], sys.version_info[1]) opt.add_option('--boost-python', type='string', default=py_version, dest='boost_python', help='select the lib python with this version \ (default: %s)' % py_version) @conf def __boost_get_version_file(self, d): if not d: return None dnode = self.root.find_dir(d) if dnode: return dnode.find_node(BOOST_VERSION_FILE) return None @conf def boost_get_version(self, d): """silently retrieve the boost version number""" node = self.__boost_get_version_file(d) if node: try: txt = node.read() except EnvironmentError: Logs.error("Could not read the file %r", node.abspath()) else: re_but1 = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.+)"', re.M) m1 = re_but1.search(txt) re_but2 = re.compile('^#define\\s+BOOST_VERSION\\s+(\\d+)', re.M) m2 = re_but2.search(txt) if m1 and m2: return (m1.group(1), m2.group(1)) return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True).split(":") @conf def boost_get_includes(self, *k, **kw): includes = k and k[0] or kw.get('includes') if includes and self.__boost_get_version_file(includes): return includes for d in self.environ.get('INCLUDE', '').split(';') + BOOST_INCLUDES: if self.__boost_get_version_file(d): return d if includes: self.end_msg('headers not found in %s' % includes) self.fatal('The configuration failed') else: self.end_msg('headers not found, please provide a --boost-includes argument (see help)') self.fatal('The configuration failed') @conf def boost_get_toolset(self, cc): toolset = cc if not cc: build_platform = Utils.unversioned_sys_platform() if build_platform in BOOST_TOOLSETS: cc = build_platform else: cc = self.env.CXX_NAME if cc in BOOST_TOOLSETS: toolset = BOOST_TOOLSETS[cc] return isinstance(toolset, str) and toolset or toolset(self.env) @conf def __boost_get_libs_path(self, *k, **kw): ''' return the lib path and all the files in it ''' if 'files' in kw: return self.root.find_dir('.'), Utils.to_list(kw['files']) libs = k and k[0] or kw.get('libs') if libs: path = self.root.find_dir(libs) files = path.ant_glob('*boost_*') if not libs or not files: for d in self.environ.get('LIB', '').split(';') + BOOST_LIBS: if not d: continue path = self.root.find_dir(d) if path: files = path.ant_glob('*boost_*') if files: break path = self.root.find_dir(d + '64') if path: files = path.ant_glob('*boost_*') if files: break if not path: if libs: self.end_msg('libs not found in %s' % libs) self.fatal('The configuration failed') else: self.end_msg('libs not found, please provide a --boost-libs argument (see help)') self.fatal('The configuration failed') self.to_log('Found the boost path in %r with the libraries:' % path) for x in files: self.to_log(' %r' % x) return path, files @conf def boost_get_libs(self, *k, **kw): ''' return the lib path and the required libs according to the parameters ''' path, files = self.__boost_get_libs_path(**kw) files = sorted(files, key=lambda f: (len(f.name), f.name), reverse=True) toolset = self.boost_get_toolset(kw.get('toolset', '')) toolset_pat = '(-%s[0-9]{0,3})' % toolset version = '-%s' % self.env.BOOST_VERSION def find_lib(re_lib, files): for file in files: if re_lib.search(file.name): self.to_log('Found boost lib %s' % file) return file return None # extensions from Tools.ccroot.lib_patterns wo_ext = re.compile(r"\.(a|so|lib|dll|dylib)(\.[0-9\.]+)?$") def format_lib_name(name): if name.startswith('lib') and self.env.CC_NAME != 'msvc': name = name[3:] return wo_ext.sub("", name) def match_libs(lib_names, is_static): libs = [] lib_names = Utils.to_list(lib_names) if not lib_names: return libs t = [] if kw.get('mt', False): t.append('-mt') if kw.get('abi'): t.append('%s%s' % (is_static and '-s' or '-', kw['abi'])) elif is_static: t.append('-s') tags_pat = t and ''.join(t) or '' ext = is_static and self.env.cxxstlib_PATTERN or self.env.cxxshlib_PATTERN ext = ext.partition('%s')[2] # remove '%s' or 'lib%s' from PATTERN for lib in lib_names: if lib == 'python': # for instance, with python='27', # accepts '-py27', '-py2', '27', '-2.7' and '2' # but will reject '-py3', '-py26', '26' and '3' tags = '({0})?((-py{2})|(-py{1}(?=[^0-9]))|({2})|(-{1}.{3})|({1}(?=[^0-9]))|(?=[^0-9])(?!-py))'.format(tags_pat, kw['python'][0], kw['python'], kw['python'][1]) else: tags = tags_pat # Trying libraries, from most strict match to least one for pattern in ['boost_%s%s%s%s%s$' % (lib, toolset_pat, tags, version, ext), 'boost_%s%s%s%s$' % (lib, tags, version, ext), # Give up trying to find the right version 'boost_%s%s%s%s$' % (lib, toolset_pat, tags, ext), 'boost_%s%s%s$' % (lib, tags, ext), 'boost_%s%s$' % (lib, ext), 'boost_%s' % lib]: self.to_log('Trying pattern %s' % pattern) file = find_lib(re.compile(pattern), files) if file: libs.append(format_lib_name(file.name)) break else: self.end_msg('lib %s not found in %s' % (lib, path.abspath())) self.fatal('The configuration failed') return libs return path.abspath(), match_libs(kw.get('lib'), False), match_libs(kw.get('stlib'), True) @conf def _check_pthread_flag(self, *k, **kw): ''' Computes which flags should be added to CXXFLAGS and LINKFLAGS to compile in multi-threading mode Yes, we *need* to put the -pthread thing in CPPFLAGS because with GCC3, boost/thread.hpp will trigger a #error if -pthread isn't used: boost/config/requires_threads.hpp:47:5: #error "Compiler threading support is not turned on. Please set the correct command line options for threading: -pthread (Linux), -pthreads (Solaris) or -mthreads (Mingw32)" Based on _BOOST_PTHREAD_FLAG(): https://github.com/tsuna/boost.m4/blob/master/build-aux/boost.m4 ''' var = kw.get('uselib_store', 'BOOST') self.start_msg('Checking the flags needed to use pthreads') # The ordering *is* (sometimes) important. Some notes on the # individual items follow: # (none): in case threads are in libc; should be tried before -Kthread and # other compiler flags to prevent continual compiler warnings # -lpthreads: AIX (must check this before -lpthread) # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) # -llthread: LinuxThreads port on FreeBSD (also preferred to -pthread) # -pthread: GNU Linux/GCC (kernel threads), BSD/GCC (userland threads) # -pthreads: Solaris/GCC # -mthreads: MinGW32/GCC, Lynx/GCC # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it # doesn't hurt to check since this sometimes defines pthreads too; # also defines -D_REENTRANT) # ... -mt is also the pthreads flag for HP/aCC # -lpthread: GNU Linux, etc. # --thread-safe: KAI C++ if Utils.unversioned_sys_platform() == "sunos": # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based # tests will erroneously succeed. (We need to link with -pthreads/-mt/ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather # a function called by this macro, so we could check for that, but # who knows whether they'll stub that too in a future libc.) So, # we'll just look for -pthreads and -lpthread first: boost_pthread_flags = ["-pthreads", "-lpthread", "-mt", "-pthread"] else: boost_pthread_flags = ["", "-lpthreads", "-Kthread", "-kthread", "-llthread", "-pthread", "-pthreads", "-mthreads", "-lpthread", "--thread-safe", "-mt"] for boost_pthread_flag in boost_pthread_flags: try: self.env.stash() self.env.append_value('CXXFLAGS_%s' % var, boost_pthread_flag) self.env.append_value('LINKFLAGS_%s' % var, boost_pthread_flag) self.check_cxx(code=PTHREAD_CODE, msg=None, use=var, execute=False) self.end_msg(boost_pthread_flag) return except self.errors.ConfigurationError: self.env.revert() self.end_msg('None') @conf def check_boost(self, *k, **kw): """ Initialize boost libraries to be used. Keywords: you can pass the same parameters as with the command line (without "--boost-"). Note that the command line has the priority, and should preferably be used. """ if not self.env['CXX']: self.fatal('load a c++ compiler first, conf.load("compiler_cxx")') params = { 'lib': k and k[0] or kw.get('lib'), 'stlib': kw.get('stlib') } for key, value in self.options.__dict__.items(): if not key.startswith('boost_'): continue key = key[len('boost_'):] params[key] = value and value or kw.get(key, '') var = kw.get('uselib_store', 'BOOST') self.find_program('dpkg-architecture', var='DPKG_ARCHITECTURE', mandatory=False) if self.env.DPKG_ARCHITECTURE: deb_host_multiarch = self.cmd_and_log([self.env.DPKG_ARCHITECTURE[0], '-qDEB_HOST_MULTIARCH']) BOOST_LIBS.insert(0, '/usr/lib/%s' % deb_host_multiarch.strip()) self.start_msg('Checking boost includes') self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params) versions = self.boost_get_version(inc) self.env.BOOST_VERSION = versions[0] self.env.BOOST_VERSION_NUMBER = int(versions[1]) self.end_msg("%d.%d.%d" % (int(versions[1]) / 100000, int(versions[1]) / 100 % 1000, int(versions[1]) % 100)) if Logs.verbose: Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var]) if not params['lib'] and not params['stlib']: return if 'static' in kw or 'static' in params: Logs.warn('boost: static parameter is deprecated, use stlib instead.') self.start_msg('Checking boost libs') path, libs, stlibs = self.boost_get_libs(**params) self.env['LIBPATH_%s' % var] = [path] self.env['STLIBPATH_%s' % var] = [path] self.env['LIB_%s' % var] = libs self.env['STLIB_%s' % var] = stlibs self.end_msg('ok') if Logs.verbose: Logs.pprint('CYAN', ' path : %s' % path) Logs.pprint('CYAN', ' shared libs : %s' % libs) Logs.pprint('CYAN', ' static libs : %s' % stlibs) def has_shlib(lib): return params['lib'] and lib in params['lib'] def has_stlib(lib): return params['stlib'] and lib in params['stlib'] def has_lib(lib): return has_shlib(lib) or has_stlib(lib) if has_lib('thread'): # not inside try_link to make check visible in the output self._check_pthread_flag(k, kw) def try_link(): if has_lib('system'): self.check_cxx(fragment=BOOST_ERROR_CODE, use=var, execute=False) if has_lib('thread'): self.check_cxx(fragment=BOOST_THREAD_CODE, use=var, execute=False) if has_lib('log'): if not has_lib('thread'): self.env['DEFINES_%s' % var] += ['BOOST_LOG_NO_THREADS'] if has_shlib('log'): self.env['DEFINES_%s' % var] += ['BOOST_LOG_DYN_LINK'] self.check_cxx(fragment=BOOST_LOG_CODE, use=var, execute=False) if params.get('linkage_autodetect', False): self.start_msg("Attempting to detect boost linkage flags") toolset = self.boost_get_toolset(kw.get('toolset', '')) if toolset in ('vc',): # disable auto-linking feature, causing error LNK1181 # because the code wants to be linked against self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB'] # if no dlls are present, we guess the .lib files are not stubs has_dlls = False for x in Utils.listdir(path): if x.endswith(self.env.cxxshlib_PATTERN % ''): has_dlls = True break if not has_dlls: self.env['STLIBPATH_%s' % var] = [path] self.env['STLIB_%s' % var] = libs del self.env['LIB_%s' % var] del self.env['LIBPATH_%s' % var] # we attempt to play with some known-to-work CXXFLAGS combinations for cxxflags in (['/MD', '/EHsc'], []): self.env.stash() self.env["CXXFLAGS_%s" % var] += cxxflags try: try_link() except Errors.ConfigurationError as e: self.env.revert() exc = e else: self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var])) exc = None self.env.commit() break if exc is not None: self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=exc) self.fatal('The configuration failed') else: self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain") self.fatal('The configuration failed') else: self.start_msg('Checking for boost linkage') try: try_link() except Errors.ConfigurationError as e: self.end_msg("Could not link against boost libraries using supplied options") self.fatal('The configuration failed') self.end_msg('ok') @feature('cxx') @after_method('apply_link') def install_boost(self): if install_boost.done or not Utils.is_win32 or not self.bld.cmd.startswith('install'): return install_boost.done = True inst_to = getattr(self, 'install_path', '${BINDIR}') for lib in self.env.LIB_BOOST: try: file = self.bld.find_file(self.env.cxxshlib_PATTERN % lib, self.env.LIBPATH_BOOST) self.add_install_files(install_to=inst_to, install_from=self.bld.root.find_node(file)) except: continue install_boost.done = False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0212092 tevent-0.11.0/third_party/waf/waflib/extras/build_file_tracker.py0000660000000000000000000000160200000000000025075 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2015 """ Force files to depend on the timestamps of those located in the build directory. You may want to use this to force partial rebuilds, see playground/track_output_files/ for a working example. Note that there is a variety of ways to implement this, one may want use timestamps on source files too for example, or one may want to hash the files in the source directory only under certain conditions (md5_tstamp tool) or to hash the file in the build directory with its timestamp """ import os from waflib import Node, Utils def get_bld_sig(self): if not self.is_bld() or self.ctx.bldnode is self.ctx.srcnode: return Utils.h_file(self.abspath()) try: # add the creation time to the signature return self.sig + str(os.stat(self.abspath()).st_mtime) except AttributeError: return None Node.Node.get_bld_sig = get_bld_sig ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/build_logs.py0000660000000000000000000000541000000000000023410 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2013 (ita) """ A system for recording all outputs to a log file. Just add the following to your wscript file:: def init(ctx): ctx.load('build_logs') """ import atexit, sys, time, os, shutil, threading from waflib import ansiterm, Logs, Context # adding the logs under the build/ directory will clash with the clean/ command try: up = os.path.dirname(Context.g_module.__file__) except AttributeError: up = '.' LOGFILE = os.path.join(up, 'logs', time.strftime('%Y_%m_%d_%H_%M.log')) wlock = threading.Lock() class log_to_file(object): def __init__(self, stream, fileobj, filename): self.stream = stream self.encoding = self.stream.encoding self.fileobj = fileobj self.filename = filename self.is_valid = True def replace_colors(self, data): for x in Logs.colors_lst.values(): if isinstance(x, str): data = data.replace(x, '') return data def write(self, data): try: wlock.acquire() self.stream.write(data) self.stream.flush() if self.is_valid: self.fileobj.write(self.replace_colors(data)) finally: wlock.release() def fileno(self): return self.stream.fileno() def flush(self): self.stream.flush() if self.is_valid: self.fileobj.flush() def isatty(self): return self.stream.isatty() def init(ctx): global LOGFILE filename = os.path.abspath(LOGFILE) try: os.makedirs(os.path.dirname(os.path.abspath(filename))) except OSError: pass if hasattr(os, 'O_NOINHERIT'): fd = os.open(LOGFILE, os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT) fileobj = os.fdopen(fd, 'w') else: fileobj = open(LOGFILE, 'w') old_stderr = sys.stderr # sys.stdout has already been replaced, so __stdout__ will be faster #sys.stdout = log_to_file(sys.stdout, fileobj, filename) #sys.stderr = log_to_file(sys.stderr, fileobj, filename) def wrap(stream): if stream.isatty(): return ansiterm.AnsiTerm(stream) return stream sys.stdout = log_to_file(wrap(sys.__stdout__), fileobj, filename) sys.stderr = log_to_file(wrap(sys.__stderr__), fileobj, filename) # now mess with the logging module... for x in Logs.log.handlers: try: stream = x.stream except AttributeError: pass else: if id(stream) == id(old_stderr): x.stream = sys.stderr def exit_cleanup(): try: fileobj = sys.stdout.fileobj except AttributeError: pass else: sys.stdout.is_valid = False sys.stderr.is_valid = False fileobj.close() filename = sys.stdout.filename Logs.info('Output logged to %r', filename) # then copy the log file to "latest.log" if possible up = os.path.dirname(os.path.abspath(filename)) try: shutil.copy(filename, os.path.join(up, 'latest.log')) except OSError: # this may fail on windows due to processes spawned pass atexit.register(exit_cleanup) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/buildcopy.py0000660000000000000000000000526000000000000023262 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Calle Rosenquist, 2017 (xbreak) """ Create task that copies source files to the associated build node. This is useful to e.g. construct a complete Python package so it can be unit tested without installation. Source files to be copied can be specified either in `buildcopy_source` attribute, or `source` attribute. If both are specified `buildcopy_source` has priority. Examples:: def build(bld): bld(name = 'bar', features = 'py buildcopy', source = bld.path.ant_glob('src/bar/*.py')) bld(name = 'py baz', features = 'buildcopy', buildcopy_source = bld.path.ant_glob('src/bar/*.py') + ['src/bar/resource.txt']) """ import os, shutil from waflib import Errors, Task, TaskGen, Utils, Node, Logs @TaskGen.before_method('process_source') @TaskGen.feature('buildcopy') def make_buildcopy(self): """ Creates the buildcopy task. """ def to_src_nodes(lst): """Find file nodes only in src, TaskGen.to_nodes will not work for this since it gives preference to nodes in build. """ if isinstance(lst, Node.Node): if not lst.is_src(): raise Errors.WafError('buildcopy: node %s is not in src'%lst) if not os.path.isfile(lst.abspath()): raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%lst) return lst if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.bld.path.get_src().search_node(lst) if node: if not os.path.isfile(node.abspath()): raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%node) return node node = self.bld.path.get_src().find_node(lst) if node: if not os.path.isfile(node.abspath()): raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%node) return node raise Errors.WafError('buildcopy: File not found in src: %s'%os.path.join(*lst)) nodes = [ to_src_nodes(n) for n in getattr(self, 'buildcopy_source', getattr(self, 'source', [])) ] if not nodes: Logs.warn('buildcopy: No source files provided to buildcopy in %s (set `buildcopy_source` or `source`)', self) return node_pairs = [(n, n.get_bld()) for n in nodes] self.create_task('buildcopy', [n[0] for n in node_pairs], [n[1] for n in node_pairs], node_pairs=node_pairs) class buildcopy(Task.Task): """ Copy for each pair `n` in `node_pairs`: n[0] -> n[1]. Attribute `node_pairs` should contain a list of tuples describing source and target: node_pairs = [(in, out), ...] """ color = 'PINK' def keyword(self): return 'Copying' def run(self): for f,t in self.node_pairs: t.parent.mkdir() shutil.copy2(f.abspath(), t.abspath()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/c_bgxlc.py0000660000000000000000000000130200000000000022662 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de """ IBM XL Compiler for Blue Gene """ from waflib.Tools import ccroot,ar from waflib.Configure import conf from waflib.Tools import xlc # method xlc_common_flags from waflib.Tools.compiler_c import c_compiler c_compiler['linux'].append('c_bgxlc') @conf def find_bgxlc(conf): cc = conf.find_program(['bgxlc_r','bgxlc'], var='CC') conf.get_xlc_version(cc) conf.env.CC = cc conf.env.CC_NAME = 'bgxlc' def configure(conf): conf.find_bgxlc() conf.find_ar() conf.xlc_common_flags() conf.env.LINKFLAGS_cshlib = ['-G','-Wl,-bexpfull'] conf.env.LINKFLAGS_cprogram = [] conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/c_dumbpreproc.py0000660000000000000000000000317300000000000024115 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Dumb C/C++ preprocessor for finding dependencies It will look at all include files it can find after removing the comments, so the following will always add the dependency on both "a.h" and "b.h":: #include "a.h" #ifdef B #include "b.h" #endif int main() { return 0; } To use:: def configure(conf): conf.load('compiler_c') conf.load('c_dumbpreproc') """ import re from waflib.Tools import c_preproc re_inc = re.compile( '^[ \t]*(#|%:)[ \t]*(include)[ \t]*[<"](.*)[>"]\r*$', re.IGNORECASE | re.MULTILINE) def lines_includes(node): code = node.read() if c_preproc.use_trigraphs: for (a, b) in c_preproc.trig_def: code = code.split(a).join(b) code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)] parser = c_preproc.c_parser class dumb_parser(parser): def addlines(self, node): if node in self.nodes[:-1]: return self.currentnode_stack.append(node.parent) # Avoid reading the same files again try: lines = self.parse_cache[node] except KeyError: lines = self.parse_cache[node] = lines_includes(node) self.lines = lines + [(c_preproc.POPFILE, '')] + self.lines def start(self, node, env): try: self.parse_cache = node.ctx.parse_cache except AttributeError: self.parse_cache = node.ctx.parse_cache = {} self.addlines(node) while self.lines: (x, y) = self.lines.pop(0) if x == c_preproc.POPFILE: self.currentnode_stack.pop() continue self.tryfind(y, env=env) c_preproc.c_parser = dumb_parser ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/c_emscripten.py0000660000000000000000000000474000000000000023745 0ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 vi:ts=4:noexpandtab import subprocess, shlex, sys from waflib.Tools import ccroot, gcc, gxx from waflib.Configure import conf from waflib.TaskGen import after_method, feature from waflib.Tools.compiler_c import c_compiler from waflib.Tools.compiler_cxx import cxx_compiler for supported_os in ('linux', 'darwin', 'gnu', 'aix'): c_compiler[supported_os].append('c_emscripten') cxx_compiler[supported_os].append('c_emscripten') @conf def get_emscripten_version(conf, cc): """ Emscripten doesn't support processing '-' like clang/gcc """ dummy = conf.cachedir.parent.make_node("waf-emscripten.c") dummy.write("") cmd = cc + ['-dM', '-E', '-x', 'c', dummy.abspath()] env = conf.env.env or None try: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out = p.communicate()[0] except Exception as e: conf.fatal('Could not determine emscripten version %r: %s' % (cmd, e)) if not isinstance(out, str): out = out.decode(sys.stdout.encoding or 'latin-1') k = {} out = out.splitlines() for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val if not ('__clang__' in k and 'EMSCRIPTEN' in k): conf.fatal('Could not determine the emscripten compiler version.') conf.env.DEST_OS = 'generic' conf.env.DEST_BINFMT = 'elf' conf.env.DEST_CPU = 'asm-js' conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) return k @conf def find_emscripten(conf): cc = conf.find_program(['emcc'], var='CC') conf.get_emscripten_version(cc) conf.env.CC = cc conf.env.CC_NAME = 'emscripten' cxx = conf.find_program(['em++'], var='CXX') conf.env.CXX = cxx conf.env.CXX_NAME = 'emscripten' conf.find_program(['emar'], var='AR') def configure(conf): conf.find_emscripten() conf.find_ar() conf.gcc_common_flags() conf.gxx_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() conf.env.ARFLAGS = ['rcs'] conf.env.cshlib_PATTERN = '%s.js' conf.env.cxxshlib_PATTERN = '%s.js' conf.env.cstlib_PATTERN = '%s.a' conf.env.cxxstlib_PATTERN = '%s.a' conf.env.cprogram_PATTERN = '%s.html' conf.env.cxxprogram_PATTERN = '%s.html' conf.env.CXX_TGT_F = ['-c', '-o', ''] conf.env.CC_TGT_F = ['-c', '-o', ''] conf.env.CXXLNK_TGT_F = ['-o', ''] conf.env.CCLNK_TGT_F = ['-o', ''] conf.env.append_value('LINKFLAGS',['-Wl,--enable-auto-import']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/c_nec.py0000660000000000000000000000337500000000000022344 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de """ NEC SX Compiler for SX vector systems """ import re from waflib import Utils from waflib.Tools import ccroot,ar from waflib.Configure import conf from waflib.Tools import xlc # method xlc_common_flags from waflib.Tools.compiler_c import c_compiler c_compiler['linux'].append('c_nec') @conf def find_sxc(conf): cc = conf.find_program(['sxcc'], var='CC') conf.get_sxc_version(cc) conf.env.CC = cc conf.env.CC_NAME = 'sxcc' @conf def get_sxc_version(conf, fc): version_re = re.compile(r"C\+\+/SX\s*Version\s*(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] p = Utils.subprocess.Popen(cmd, stdin=False, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE, env=None) out, err = p.communicate() if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the NEC C compiler version.') k = match.groupdict() conf.env['C_VERSION'] = (k['major'], k['minor']) @conf def sxc_common_flags(conf): v=conf.env v['CC_SRC_F']=[] v['CC_TGT_F']=['-c','-o'] if not v['LINK_CC']: v['LINK_CC']=v['CC'] v['CCLNK_SRC_F']=[] v['CCLNK_TGT_F']=['-o'] v['CPPPATH_ST']='-I%s' v['DEFINES_ST']='-D%s' v['LIB_ST']='-l%s' v['LIBPATH_ST']='-L%s' v['STLIB_ST']='-l%s' v['STLIBPATH_ST']='-L%s' v['RPATH_ST']='' v['SONAME_ST']=[] v['SHLIB_MARKER']=[] v['STLIB_MARKER']=[] v['LINKFLAGS_cprogram']=[''] v['cprogram_PATTERN']='%s' v['CFLAGS_cshlib']=['-fPIC'] v['LINKFLAGS_cshlib']=[''] v['cshlib_PATTERN']='lib%s.so' v['LINKFLAGS_cstlib']=[] v['cstlib_PATTERN']='lib%s.a' def configure(conf): conf.find_sxc() conf.find_program('sxar',VAR='AR') conf.sxc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/cabal.py0000660000000000000000000001205500000000000022332 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Anton Feldmann, 2012 # "Base for cabal" from waflib import Task, Utils from waflib.TaskGen import extension from waflib.Utils import threading from shutil import rmtree lock = threading.Lock() registering = False def configure(self): self.find_program('cabal', var='CABAL') self.find_program('ghc-pkg', var='GHCPKG') pkgconfd = self.bldnode.abspath() + '/package.conf.d' self.env.PREFIX = self.bldnode.abspath() + '/dist' self.env.PKGCONFD = pkgconfd if self.root.find_node(pkgconfd + '/package.cache'): self.msg('Using existing package database', pkgconfd, color='CYAN') else: pkgdir = self.root.find_dir(pkgconfd) if pkgdir: self.msg('Deleting corrupt package database', pkgdir.abspath(), color ='RED') rmtree(pkgdir.abspath()) pkgdir = None self.cmd_and_log(self.env.GHCPKG + ['init', pkgconfd]) self.msg('Created package database', pkgconfd, color = 'YELLOW' if pkgdir else 'GREEN') @extension('.cabal') def process_cabal(self, node): out_dir_node = self.bld.root.find_dir(self.bld.out_dir) package_node = node.change_ext('.package') package_node = out_dir_node.find_or_declare(package_node.name) build_node = node.parent.get_bld() build_path = build_node.abspath() config_node = build_node.find_or_declare('setup-config') inplace_node = build_node.find_or_declare('package.conf.inplace') config_task = self.create_task('cabal_configure', node) config_task.cwd = node.parent.abspath() config_task.depends_on = getattr(self, 'depends_on', '') config_task.build_path = build_path config_task.set_outputs(config_node) build_task = self.create_task('cabal_build', config_node) build_task.cwd = node.parent.abspath() build_task.build_path = build_path build_task.set_outputs(inplace_node) copy_task = self.create_task('cabal_copy', inplace_node) copy_task.cwd = node.parent.abspath() copy_task.depends_on = getattr(self, 'depends_on', '') copy_task.build_path = build_path last_task = copy_task task_list = [config_task, build_task, copy_task] if (getattr(self, 'register', False)): register_task = self.create_task('cabal_register', inplace_node) register_task.cwd = node.parent.abspath() register_task.set_run_after(copy_task) register_task.build_path = build_path pkgreg_task = self.create_task('ghcpkg_register', inplace_node) pkgreg_task.cwd = node.parent.abspath() pkgreg_task.set_run_after(register_task) pkgreg_task.build_path = build_path last_task = pkgreg_task task_list += [register_task, pkgreg_task] touch_task = self.create_task('cabal_touch', inplace_node) touch_task.set_run_after(last_task) touch_task.set_outputs(package_node) touch_task.build_path = build_path task_list += [touch_task] return task_list def get_all_src_deps(node): hs_deps = node.ant_glob('**/*.hs') hsc_deps = node.ant_glob('**/*.hsc') lhs_deps = node.ant_glob('**/*.lhs') c_deps = node.ant_glob('**/*.c') cpp_deps = node.ant_glob('**/*.cpp') proto_deps = node.ant_glob('**/*.proto') return sum([hs_deps, hsc_deps, lhs_deps, c_deps, cpp_deps, proto_deps], []) class Cabal(Task.Task): def scan(self): return (get_all_src_deps(self.generator.path), ()) class cabal_configure(Cabal): run_str = '${CABAL} configure -v0 --prefix=${PREFIX} --global --user --package-db=${PKGCONFD} --builddir=${tsk.build_path}' shell = True def scan(self): out_node = self.generator.bld.root.find_dir(self.generator.bld.out_dir) deps = [out_node.find_or_declare(dep).change_ext('.package') for dep in Utils.to_list(self.depends_on)] return (deps, ()) class cabal_build(Cabal): run_str = '${CABAL} build -v1 --builddir=${tsk.build_path}/' shell = True class cabal_copy(Cabal): run_str = '${CABAL} copy -v0 --builddir=${tsk.build_path}' shell = True class cabal_register(Cabal): run_str = '${CABAL} register -v0 --gen-pkg-config=${tsk.build_path}/pkg.config --builddir=${tsk.build_path}' shell = True class ghcpkg_register(Cabal): run_str = '${GHCPKG} update -v0 --global --user --package-conf=${PKGCONFD} ${tsk.build_path}/pkg.config' shell = True def runnable_status(self): global lock, registering val = False lock.acquire() val = registering lock.release() if val: return Task.ASK_LATER ret = Task.Task.runnable_status(self) if ret == Task.RUN_ME: lock.acquire() registering = True lock.release() return ret def post_run(self): global lock, registering lock.acquire() registering = False lock.release() return Task.Task.post_run(self) class cabal_touch(Cabal): run_str = 'touch ${TGT}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/cfg_altoptions.py0000660000000000000000000000541100000000000024301 0ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # Tool to extend c_config.check_cfg() __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ This tool allows to work around the absence of ``*-config`` programs on systems, by keeping the same clean configuration syntax but inferring values or permitting their modification via the options interface. Note that pkg-config can also support setting ``PKG_CONFIG_PATH``, so you can put custom files in a folder containing new .pc files. This tool could also be implemented by taking advantage of this fact. Usage:: def options(opt): opt.load('c_config_alt') opt.add_package_option('package') def configure(cfg): conf.load('c_config_alt') conf.check_cfg(...) Known issues: - Behavior with different build contexts... """ import os import functools from waflib import Configure, Options, Errors def name_to_dest(x): return x.lower().replace('-', '_') def options(opt): def x(opt, param): dest = name_to_dest(param) gr = opt.get_option_group("configure options") gr.add_option('--%s-root' % dest, help="path containing include and lib subfolders for %s" \ % param, ) opt.add_package_option = functools.partial(x, opt) check_cfg_old = getattr(Configure.ConfigurationContext, 'check_cfg') @Configure.conf def check_cfg(conf, *k, **kw): if k: lst = k[0].split() kw['package'] = lst[0] kw['args'] = ' '.join(lst[1:]) if not 'package' in kw: return check_cfg_old(conf, **kw) package = kw['package'] package_lo = name_to_dest(package) package_hi = package.upper().replace('-', '_') # TODO FIXME package_hi = kw.get('uselib_store', package_hi) def check_folder(path, name): try: assert os.path.isdir(path) except AssertionError: raise Errors.ConfigurationError( "%s_%s (%s) is not a folder!" \ % (package_lo, name, path)) return path root = getattr(Options.options, '%s_root' % package_lo, None) if root is None: return check_cfg_old(conf, **kw) else: def add_manual_var(k, v): conf.start_msg('Adding for %s a manual var' % (package)) conf.env["%s_%s" % (k, package_hi)] = v conf.end_msg("%s = %s" % (k, v)) check_folder(root, 'root') pkg_inc = check_folder(os.path.join(root, "include"), 'inc') add_manual_var('INCLUDES', [pkg_inc]) pkg_lib = check_folder(os.path.join(root, "lib"), 'libpath') add_manual_var('LIBPATH', [pkg_lib]) add_manual_var('LIB', [package]) for x in kw.get('manual_deps', []): for k, v in sorted(conf.env.get_merged_dict().items()): if k.endswith('_%s' % x): k = k.replace('_%s' % x, '') conf.start_msg('Adding for %s a manual dep' \ %(package)) conf.env["%s_%s" % (k, package_hi)] += v conf.end_msg('%s += %s' % (k, v)) return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/extras/clang_compilation_database.py0000660000000000000000000000630300000000000026575 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Christoph Koke, 2013 # Alibek Omarov, 2019 """ Writes the c and cpp compile commands into build/compile_commands.json see http://clang.llvm.org/docs/JSONCompilationDatabase.html Usage: Load this tool in `options` to be able to generate database by request in command-line and before build: $ waf clangdb def options(opt): opt.load('clang_compilation_database') Otherwise, load only in `configure` to generate it always before build. def configure(conf): conf.load('compiler_cxx') ... conf.load('clang_compilation_database') """ from waflib import Logs, TaskGen, Task, Build, Scripting Task.Task.keep_last_cmd = True @TaskGen.feature('c', 'cxx') @TaskGen.after_method('process_use') def collect_compilation_db_tasks(self): "Add a compilation database entry for compiled tasks" if not isinstance(self.bld, ClangDbContext): return tup = tuple(y for y in [Task.classes.get(x) for x in ('c', 'cxx')] if y) for task in getattr(self, 'compiled_tasks', []): if isinstance(task, tup): self.bld.clang_compilation_database_tasks.append(task) class ClangDbContext(Build.BuildContext): '''generates compile_commands.json by request''' cmd = 'clangdb' clang_compilation_database_tasks = [] def write_compilation_database(self): """ Write the clang compilation database as JSON """ database_file = self.bldnode.make_node('compile_commands.json') Logs.info('Build commands will be stored in %s', database_file.path_from(self.path)) try: root = database_file.read_json() except IOError: root = [] clang_db = dict((x['file'], x) for x in root) for task in self.clang_compilation_database_tasks: try: cmd = task.last_cmd except AttributeError: continue f_node = task.inputs[0] filename = f_node.path_from(task.get_cwd()) entry = { "directory": task.get_cwd().abspath(), "arguments": cmd, "file": filename, } clang_db[filename] = entry root = list(clang_db.values()) database_file.write_json(root) def execute(self): """ Build dry run """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) self.pre_build() # we need only to generate last_cmd, so override # exec_command temporarily def exec_command(self, *k, **kw): return 0 for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() if isinstance(tg, Task.Task): lst = [tg] else: lst = tg.tasks for tsk in lst: tup = tuple(y for y in [Task.classes.get(x) for x in ('c', 'cxx')] if y) if isinstance(tsk, tup): old_exec = tsk.exec_command tsk.exec_command = exec_command tsk.run() tsk.exec_command = old_exec self.write_compilation_database() EXECUTE_PATCHED = False def patch_execute(): global EXECUTE_PATCHED if EXECUTE_PATCHED: return def new_execute_build(self): """ Invoke clangdb command before build """ if self.cmd.startswith('build'): Scripting.run_command('clangdb') old_execute_build(self) old_execute_build = getattr(Build.BuildContext, 'execute_build', None) setattr(Build.BuildContext, 'execute_build', new_execute_build) EXECUTE_PATCHED = True patch_execute() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/clang_cross.py0000660000000000000000000000476600000000000023577 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Krzysztof Kosiński 2014 # DragoonX6 2018 """ Detect the Clang C compiler This version is an attempt at supporting the -target and -sysroot flag of Clang. """ from waflib.Tools import ccroot, ar, gcc from waflib.Configure import conf import waflib.Context import waflib.extras.clang_cross_common def options(opt): """ Target triplet for clang:: $ waf configure --clang-target-triple=x86_64-pc-linux-gnu """ cc_compiler_opts = opt.add_option_group('Configuration options') cc_compiler_opts.add_option('--clang-target-triple', default=None, help='Target triple for clang', dest='clang_target_triple') cc_compiler_opts.add_option('--clang-sysroot', default=None, help='Sysroot for clang', dest='clang_sysroot') @conf def find_clang(conf): """ Finds the program clang and executes it to ensure it really is clang """ import os cc = conf.find_program('clang', var='CC') if conf.options.clang_target_triple != None: conf.env.append_value('CC', ['-target', conf.options.clang_target_triple]) if conf.options.clang_sysroot != None: sysroot = str() if os.path.isabs(conf.options.clang_sysroot): sysroot = conf.options.clang_sysroot else: sysroot = os.path.normpath(os.path.join(os.getcwd(), conf.options.clang_sysroot)) conf.env.append_value('CC', ['--sysroot', sysroot]) conf.get_cc_version(cc, clang=True) conf.env.CC_NAME = 'clang' @conf def clang_modifier_x86_64_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clang_modifier_i386_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clang_modifier_x86_64_windows_msvc(conf): conf.clang_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clang_modifier_x86_64_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() @conf def clang_modifier_i386_windows_msvc(conf): conf.clang_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clang_modifier_i386_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() def configure(conf): conf.find_clang() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gcc_common_flags() # Allow the user to provide flags for the target platform. conf.gcc_modifier_platform() # And allow more fine grained control based on the compiler's triplet. conf.clang_modifier_target_triple() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/clang_cross_common.py0000660000000000000000000000654200000000000025141 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # DragoonX6 2018 """ Common routines for cross_clang.py and cross_clangxx.py """ from waflib.Configure import conf import waflib.Context def normalize_target_triple(target_triple): target_triple = target_triple[:-1] normalized_triple = target_triple.replace('--', '-unknown-') if normalized_triple.startswith('-'): normalized_triple = 'unknown' + normalized_triple if normalized_triple.endswith('-'): normalized_triple += 'unknown' # Normalize MinGW builds to *arch*-w64-mingw32 if normalized_triple.endswith('windows-gnu'): normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-w64-mingw32' # Strip the vendor when doing msvc builds, since it's unused anyway. if normalized_triple.endswith('windows-msvc'): normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-windows-msvc' return normalized_triple.replace('-', '_') @conf def clang_modifier_msvc(conf): import os """ Really basic setup to use clang in msvc mode. We actually don't really want to do a lot, even though clang is msvc compatible in this mode, that doesn't mean we're actually using msvc. It's probably the best to leave it to the user, we can assume msvc mode if the user uses the clang-cl frontend, but this module only concerns itself with the gcc-like frontend. """ v = conf.env v.cprogram_PATTERN = '%s.exe' v.cshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.lib' v.IMPLIB_ST = '-Wl,-IMPLIB:%s' v.SHLIB_MARKER = [] v.CFLAGS_cshlib = [] v.LINKFLAGS_cshlib = ['-Wl,-DLL'] v.cstlib_PATTERN = '%s.lib' v.STLIB_MARKER = [] del(v.AR) conf.find_program(['llvm-lib', 'lib'], var='AR') v.ARFLAGS = ['-nologo'] v.AR_TGT_F = ['-out:'] # Default to the linker supplied with llvm instead of link.exe or ld v.LINK_CC = v.CC + ['-fuse-ld=lld', '-nostdlib'] v.CCLNK_TGT_F = ['-o'] v.def_PATTERN = '-Wl,-def:%s' v.LINKFLAGS = [] v.LIB_ST = '-l%s' v.LIBPATH_ST = '-Wl,-LIBPATH:%s' v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-Wl,-LIBPATH:%s' CFLAGS_CRT_COMMON = [ '-Xclang', '--dependent-lib=oldnames', '-Xclang', '-fno-rtti-data', '-D_MT' ] v.CFLAGS_CRT_MULTITHREADED = CFLAGS_CRT_COMMON + [ '-Xclang', '-flto-visibility-public-std', '-Xclang', '--dependent-lib=libcmt', ] v.CXXFLAGS_CRT_MULTITHREADED = v.CFLAGS_CRT_MULTITHREADED v.CFLAGS_CRT_MULTITHREADED_DBG = CFLAGS_CRT_COMMON + [ '-D_DEBUG', '-Xclang', '-flto-visibility-public-std', '-Xclang', '--dependent-lib=libcmtd', ] v.CXXFLAGS_CRT_MULTITHREADED_DBG = v.CFLAGS_CRT_MULTITHREADED_DBG v.CFLAGS_CRT_MULTITHREADED_DLL = CFLAGS_CRT_COMMON + [ '-D_DLL', '-Xclang', '--dependent-lib=msvcrt' ] v.CXXFLAGS_CRT_MULTITHREADED_DLL = v.CFLAGS_CRT_MULTITHREADED_DLL v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = CFLAGS_CRT_COMMON + [ '-D_DLL', '-D_DEBUG', '-Xclang', '--dependent-lib=msvcrtd', ] v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CFLAGS_CRT_MULTITHREADED_DLL_DBG @conf def clang_modifier_target_triple(conf, cpp=False): compiler = conf.env.CXX if cpp else conf.env.CC output = conf.cmd_and_log(compiler + ['-dumpmachine'], output=waflib.Context.STDOUT) modifier = ('clangxx' if cpp else 'clang') + '_modifier_' clang_modifier_func = getattr(conf, modifier + normalize_target_triple(output), None) if clang_modifier_func: clang_modifier_func() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/clangxx_cross.py0000660000000000000000000000571200000000000024147 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) # DragoonX6 2018 """ Detect the Clang++ C++ compiler This version is an attempt at supporting the -target and -sysroot flag of Clang++. """ from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf import waflib.extras.clang_cross_common def options(opt): """ Target triplet for clang++:: $ waf configure --clangxx-target-triple=x86_64-pc-linux-gnu """ cxx_compiler_opts = opt.add_option_group('Configuration options') cxx_compiler_opts.add_option('--clangxx-target-triple', default=None, help='Target triple for clang++', dest='clangxx_target_triple') cxx_compiler_opts.add_option('--clangxx-sysroot', default=None, help='Sysroot for clang++', dest='clangxx_sysroot') @conf def find_clangxx(conf): """ Finds the program clang++, and executes it to ensure it really is clang++ """ import os cxx = conf.find_program('clang++', var='CXX') if conf.options.clangxx_target_triple != None: conf.env.append_value('CXX', ['-target', conf.options.clangxx_target_triple]) if conf.options.clangxx_sysroot != None: sysroot = str() if os.path.isabs(conf.options.clangxx_sysroot): sysroot = conf.options.clangxx_sysroot else: sysroot = os.path.normpath(os.path.join(os.getcwd(), conf.options.clangxx_sysroot)) conf.env.append_value('CXX', ['--sysroot', sysroot]) conf.get_cc_version(cxx, clang=True) conf.env.CXX_NAME = 'clang' @conf def clangxx_modifier_x86_64_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clangxx_modifier_i386_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clangxx_modifier_msvc(conf): v = conf.env v.cxxprogram_PATTERN = v.cprogram_PATTERN v.cxxshlib_PATTERN = v.cshlib_PATTERN v.CXXFLAGS_cxxshlib = [] v.LINKFLAGS_cxxshlib = v.LINKFLAGS_cshlib v.cxxstlib_PATTERN = v.cstlib_PATTERN v.LINK_CXX = v.CXX + ['-fuse-ld=lld', '-nostdlib'] v.CXXLNK_TGT_F = v.CCLNK_TGT_F @conf def clangxx_modifier_x86_64_windows_msvc(conf): conf.clang_modifier_msvc() conf.clangxx_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clangxx_modifier_x86_64_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() @conf def clangxx_modifier_i386_windows_msvc(conf): conf.clang_modifier_msvc() conf.clangxx_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clangxx_modifier_i386_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() def configure(conf): conf.find_clangxx() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gxx_common_flags() # Allow the user to provide flags for the target platform. conf.gxx_modifier_platform() # And allow more fine grained control based on the compiler's triplet. conf.clang_modifier_target_triple(cpp=True) conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/codelite.py0000660000000000000000000010214000000000000023053 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # CodeLite Project # Christian Klein (chrikle@berlios.de) # Created: Jan 2012 # As templete for this file I used the msvs.py # I hope this template will work proper """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ To add this tool to your project: def options(conf): opt.load('codelite') It can be a good idea to add the sync_exec tool too. To generate solution files: $ waf configure codelite To customize the outputs, provide subclasses in your wscript files: from waflib.extras import codelite class vsnode_target(codelite.vsnode_target): def get_build_command(self, props): # likely to be required return "waf.bat build" def collect_source(self): # likely to be required ... class codelite_bar(codelite.codelite_generator): def init(self): codelite.codelite_generator.init(self) self.vsnode_target = vsnode_target The codelite class re-uses the same build() function for reading the targets (task generators), you may therefore specify codelite settings on the context object: def build(bld): bld.codelite_solution_name = 'foo.workspace' bld.waf_command = 'waf.bat' bld.projects_dir = bld.srcnode.make_node('') bld.projects_dir.mkdir() ASSUMPTIONS: * a project can be either a directory or a target, project files are written only for targets that have source files * each project is a vcxproj file, therefore the project uuid needs only to be a hash of the absolute path """ import os, re, sys import uuid # requires python 2.5 from waflib.Build import BuildContext from waflib import Utils, TaskGen, Logs, Task, Context, Node, Options HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' PROJECT_TEMPLATE = r''' ${for x in project.source} ${if (project.get_key(x)=="sourcefile")} ${endif} ${endfor} ${for x in project.source} ${if (project.get_key(x)=="headerfile")} ${endif} ${endfor} $b = project.build_properties[0]} ${xml:project.get_rebuild_command(project.build_properties[0])} ${xml:project.get_clean_command(project.build_properties[0])} ${xml:project.get_build_command(project.build_properties[0])} ${xml:project.get_install_command(project.build_properties[0])} ${xml:project.get_build_and_install_command(project.build_properties[0])} ${xml:project.get_build_all_command(project.build_properties[0])} ${xml:project.get_rebuild_all_command(project.build_properties[0])} ${xml:project.get_clean_all_command(project.build_properties[0])} ${xml:project.get_build_and_install_all_command(project.build_properties[0])} None ''' SOLUTION_TEMPLATE = ''' ${for p in project.all_projects} ${endfor} ${for p in project.all_projects} ${endfor} ''' COMPILE_TEMPLATE = '''def f(project): lst = [] def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") %s #f = open('cmd.txt', 'w') #f.write(str(lst)) #f.close() return ''.join(lst) ''' reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P[^}]*?)\})", re.M) def compile_template(line): """ Compile a template expression into a python function (like jsps, but way shorter) """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return "\\" elif g('subst'): extr.append(g('code')) return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') assert(extr) indent = 0 buf = [] app = buf.append def app(txt): buf.append(indent * '\t' + txt) for x in range(len(extr)): if params[x]: app("lst.append(%r)" % params[x]) f = extr[x] if f.startswith(('if', 'for')): app(f + ':') indent += 1 elif f.startswith('py:'): app(f[3:]) elif f.startswith(('endif', 'endfor')): indent -= 1 elif f.startswith(('else', 'elif')): indent -= 1 app(f + ':') indent += 1 elif f.startswith('xml:'): app('lst.append(xml_escape(%s))' % f[4:]) else: #app('lst.append((%s) or "cannot find %s")' % (f, f)) app('lst.append(%s)' % f) if extr: if params[-1]: app("lst.append(%r)" % params[-1]) fun = COMPILE_TEMPLATE % "\n\t".join(buf) #print(fun) return Task.funex(fun) re_blank = re.compile('(\n|\r|\\s)*\n', re.M) def rm_blank_lines(txt): txt = re_blank.sub('\r\n', txt) return txt BOM = '\xef\xbb\xbf' try: BOM = bytes(BOM, 'latin-1') # python 3 except (TypeError, NameError): pass def stealth_write(self, data, flags='wb'): try: unicode except NameError: data = data.encode('utf-8') # python 3 else: data = data.decode(sys.getfilesystemencoding(), 'replace') data = data.encode('utf-8') if self.name.endswith('.project'): data = BOM + data try: txt = self.read(flags='rb') if txt != data: raise ValueError('must write') except (IOError, ValueError): self.write(data, flags=flags) else: Logs.debug('codelite: skipping %r', self) Node.Node.stealth_write = stealth_write re_quote = re.compile("[^a-zA-Z0-9-]") def quote(s): return re_quote.sub("_", s) def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") def make_uuid(v, prefix = None): """ simple utility function """ if isinstance(v, dict): keys = list(v.keys()) keys.sort() tmp = str([(k, v[k]) for k in keys]) else: tmp = str(v) d = Utils.md5(tmp.encode()).hexdigest().upper() if prefix: d = '%s%s' % (prefix, d[8:]) gid = uuid.UUID(d, version = 4) return str(gid).upper() def diff(node, fromnode): # difference between two nodes, but with "(..)" instead of ".." c1 = node c2 = fromnode c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while id(c1) != id(c2): lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent for i in range(up): lst.append('(..)') lst.reverse() return tuple(lst) class build_property(object): pass class vsnode(object): """ Abstract class representing visual studio elements We assume that all visual studio nodes have a uuid and a parent """ def __init__(self, ctx): self.ctx = ctx # codelite context self.name = '' # string, mandatory self.vspath = '' # path in visual studio (name for dirs, absolute path for projects) self.uuid = '' # string, mandatory self.parent = None # parent node for visual studio nesting def get_waf(self): """ Override in subclasses... """ return '%s/%s' % (self.ctx.srcnode.abspath(), getattr(self.ctx, 'waf_command', 'waf')) def ptype(self): """ Return a special uuid for projects written in the solution file """ pass def write(self): """ Write the project file, by default, do nothing """ pass def make_uuid(self, val): """ Alias for creating uuid values easily (the templates cannot access global variables) """ return make_uuid(val) class vsnode_vsdir(vsnode): """ Nodes representing visual studio folders (which do not match the filesystem tree!) """ VS_GUID_SOLUTIONFOLDER = "2150E333-8FDC-42A3-9474-1A3956D46DE8" def __init__(self, ctx, uuid, name, vspath=''): vsnode.__init__(self, ctx) self.title = self.name = name self.uuid = uuid self.vspath = vspath or name def ptype(self): return self.VS_GUID_SOLUTIONFOLDER class vsnode_project(vsnode): """ Abstract class representing visual studio project elements A project is assumed to be writable, and has a node representing the file to write to """ VS_GUID_VCPROJ = "8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942" def ptype(self): return self.VS_GUID_VCPROJ def __init__(self, ctx, node): vsnode.__init__(self, ctx) self.path = node self.uuid = make_uuid(node.abspath()) self.name = node.name self.title = self.path.abspath() self.source = [] # list of node objects self.build_properties = [] # list of properties (nmake commands, output dir, etc) def dirs(self): """ Get the list of parent folders of the source files (header files included) for writing the filters """ lst = [] def add(x): if x.height() > self.tg.path.height() and x not in lst: lst.append(x) add(x.parent) for x in self.source: add(x.parent) return lst def write(self): Logs.debug('codelite: creating %r', self.path) #print "self.name:",self.name # first write the project file template1 = compile_template(PROJECT_TEMPLATE) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) # then write the filter #template2 = compile_template(FILTER_TEMPLATE) #filter_str = template2(self) #filter_str = rm_blank_lines(filter_str) #tmp = self.path.parent.make_node(self.path.name + '.filters') #tmp.stealth_write(filter_str) def get_key(self, node): """ required for writing the source files """ name = node.name if name.endswith(('.cpp', '.c')): return 'sourcefile' return 'headerfile' def collect_properties(self): """ Returns a list of triplet (configuration, platform, output_directory) """ ret = [] for c in self.ctx.configurations: for p in self.ctx.platforms: x = build_property() x.outdir = '' x.configuration = c x.platform = p x.preprocessor_definitions = '' x.includes_search_path = '' # can specify "deploy_dir" too ret.append(x) self.build_properties = ret def get_build_params(self, props): opt = '' return (self.get_waf(), opt) def get_build_command(self, props): return "%s build %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build %s" % self.get_build_params(props) def get_install_command(self, props): return "%s install %s" % self.get_build_params(props) def get_build_and_install_command(self, props): return "%s build install %s" % self.get_build_params(props) def get_build_and_install_all_command(self, props): return "%s build install" % self.get_build_params(props)[0] def get_clean_all_command(self, props): return "%s clean" % self.get_build_params(props)[0] def get_build_all_command(self, props): return "%s build" % self.get_build_params(props)[0] def get_rebuild_all_command(self, props): return "%s clean build" % self.get_build_params(props)[0] def get_filter_name(self, node): lst = diff(node, self.tg.path) return '\\'.join(lst) or '.' class vsnode_alias(vsnode_project): def __init__(self, ctx, node, name): vsnode_project.__init__(self, ctx, node) self.name = name self.output_file = '' class vsnode_build_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make all" (starting one process by target is slow) This is the only alias enabled by default """ def __init__(self, ctx, node, name='build_all_projects'): vsnode_alias.__init__(self, ctx, node, name) self.is_active = True class vsnode_install_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make install" """ def __init__(self, ctx, node, name='install_all_projects'): vsnode_alias.__init__(self, ctx, node, name) def get_build_command(self, props): return "%s build install %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build install %s" % self.get_build_params(props) class vsnode_project_view(vsnode_alias): """ Fake target used to emulate a file system view """ def __init__(self, ctx, node, name='project_view'): vsnode_alias.__init__(self, ctx, node, name) self.tg = self.ctx() # fake one, cannot remove self.exclude_files = Node.exclude_regs + ''' waf-2* waf3-2*/** .waf-2* .waf3-2*/** **/*.sdf **/*.suo **/*.ncb **/%s ''' % Options.lockfile def collect_source(self): # this is likely to be slow self.source = self.ctx.srcnode.ant_glob('**', excl=self.exclude_files) def get_build_command(self, props): params = self.get_build_params(props) + (self.ctx.cmd,) return "%s %s %s" % params def get_clean_command(self, props): return "" def get_rebuild_command(self, props): return self.get_build_command(props) class vsnode_target(vsnode_project): """ CodeLite project representing a targets (programs, libraries, etc) and bound to a task generator """ def __init__(self, ctx, tg): """ A project is more or less equivalent to a file/folder """ base = getattr(ctx, 'projects_dir', None) or tg.path node = base.make_node(quote(tg.name) + ctx.project_extension) # the project file as a Node vsnode_project.__init__(self, ctx, node) self.name = quote(tg.name) self.tg = tg # task generator def get_build_params(self, props): """ Override the default to add the target name """ opt = '' if getattr(self, 'tg', None): opt += " --targets=%s" % self.tg.name return (self.get_waf(), opt) def collect_source(self): tg = self.tg source_files = tg.to_nodes(getattr(tg, 'source', [])) include_dirs = Utils.to_list(getattr(tg, 'codelite_includes', [])) include_files = [] for x in include_dirs: if isinstance(x, str): x = tg.path.find_node(x) if x: lst = [y for y in x.ant_glob(HEADERS_GLOB, flat=False)] include_files.extend(lst) # remove duplicates self.source.extend(list(set(source_files + include_files))) self.source.sort(key=lambda x: x.abspath()) def collect_properties(self): """ CodeLite projects are associated with platforms and configurations (for building especially) """ super(vsnode_target, self).collect_properties() for x in self.build_properties: x.outdir = self.path.parent.abspath() x.preprocessor_definitions = '' x.includes_search_path = '' try: tsk = self.tg.link_task except AttributeError: pass else: x.output_file = tsk.outputs[0].abspath() x.preprocessor_definitions = ';'.join(tsk.env.DEFINES) x.includes_search_path = ';'.join(self.tg.env.INCPATHS) class codelite_generator(BuildContext): '''generates a CodeLite workspace''' cmd = 'codelite' fun = 'build' def init(self): """ Some data that needs to be present """ if not getattr(self, 'configurations', None): self.configurations = ['Release'] # LocalRelease, RemoteDebug, etc if not getattr(self, 'platforms', None): self.platforms = ['Win32'] if not getattr(self, 'all_projects', None): self.all_projects = [] if not getattr(self, 'project_extension', None): self.project_extension = '.project' if not getattr(self, 'projects_dir', None): self.projects_dir = self.srcnode.make_node('') self.projects_dir.mkdir() # bind the classes to the object, so that subclass can provide custom generators if not getattr(self, 'vsnode_vsdir', None): self.vsnode_vsdir = vsnode_vsdir if not getattr(self, 'vsnode_target', None): self.vsnode_target = vsnode_target if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = vsnode_build_all if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = vsnode_install_all if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = vsnode_project_view self.numver = '11.00' self.vsver = '2010' def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) # user initialization self.init() # two phases for creating the solution self.collect_projects() # add project objects into "self.all_projects" self.write_files() # write the corresponding project and solution files def collect_projects(self): """ Fill the list self.all_projects with project objects Fill the list of build targets """ self.collect_targets() #self.add_aliases() #self.collect_dirs() default_project = getattr(self, 'default_project', None) def sortfun(x): if x.name == default_project: return '' return getattr(x, 'path', None) and x.path.abspath() or x.name self.all_projects.sort(key=sortfun) def write_files(self): """ Write the project and solution files from the data collected so far. It is unlikely that you will want to change this """ for p in self.all_projects: p.write() # and finally write the solution file node = self.get_solution_node() node.parent.mkdir() Logs.warn('Creating %r', node) #a = dir(self.root) #for b in a: # print b #print self.group_names #print "Hallo2: ",self.root.listdir() #print getattr(self, 'codelite_solution_name', None) template1 = compile_template(SOLUTION_TEMPLATE) sln_str = template1(self) sln_str = rm_blank_lines(sln_str) node.stealth_write(sln_str) def get_solution_node(self): """ The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one """ try: return self.solution_node except: pass codelite_solution_name = getattr(self, 'codelite_solution_name', None) if not codelite_solution_name: codelite_solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.workspace' setattr(self, 'codelite_solution_name', codelite_solution_name) if os.path.isabs(codelite_solution_name): self.solution_node = self.root.make_node(codelite_solution_name) else: self.solution_node = self.srcnode.make_node(codelite_solution_name) return self.solution_node def project_configurations(self): """ Helper that returns all the pairs (config,platform) """ ret = [] for c in self.configurations: for p in self.platforms: ret.append((c, p)) return ret def collect_targets(self): """ Process the list of task generators """ for g in self.groups: for tg in g: if not isinstance(tg, TaskGen.task_gen): continue if not hasattr(tg, 'codelite_includes'): tg.codelite_includes = tg.to_list(getattr(tg, 'includes', [])) + tg.to_list(getattr(tg, 'export_includes', [])) tg.post() if not getattr(tg, 'link_task', None): continue p = self.vsnode_target(self, tg) p.collect_source() # delegate this processing p.collect_properties() self.all_projects.append(p) def add_aliases(self): """ Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7 We also add an alias for "make install" (disabled by default) """ base = getattr(self, 'projects_dir', None) or self.tg.path node_project = base.make_node('build_all_projects' + self.project_extension) # Node p_build = self.vsnode_build_all(self, node_project) p_build.collect_properties() self.all_projects.append(p_build) node_project = base.make_node('install_all_projects' + self.project_extension) # Node p_install = self.vsnode_install_all(self, node_project) p_install.collect_properties() self.all_projects.append(p_install) node_project = base.make_node('project_view' + self.project_extension) # Node p_view = self.vsnode_project_view(self, node_project) p_view.collect_source() p_view.collect_properties() self.all_projects.append(p_view) n = self.vsnode_vsdir(self, make_uuid(self.srcnode.abspath() + 'build_aliases'), "build_aliases") p_build.parent = p_install.parent = p_view.parent = n self.all_projects.append(n) def collect_dirs(self): """ Create the folder structure in the CodeLite project view """ seen = {} def make_parents(proj): # look at a project, try to make a parent if getattr(proj, 'parent', None): # aliases already have parents return x = proj.iter_path if x in seen: proj.parent = seen[x] return # There is not vsnode_vsdir for x. # So create a project representing the folder "x" n = proj.parent = seen[x] = self.vsnode_vsdir(self, make_uuid(x.abspath()), x.name) n.iter_path = x.parent self.all_projects.append(n) # recurse up to the project directory if x.height() > self.srcnode.height() + 1: make_parents(n) for p in self.all_projects[:]: # iterate over a copy of all projects if not getattr(p, 'tg', None): # but only projects that have a task generator continue # make a folder for each task generator p.iter_path = p.tg.path make_parents(p) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/color_gcc.py0000660000000000000000000000216600000000000023224 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Replaces the default formatter by one which understands GCC output and colorizes it. __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2012" import sys from waflib import Logs class ColorGCCFormatter(Logs.formatter): def __init__(self, colors): self.colors = colors Logs.formatter.__init__(self) def format(self, rec): frame = sys._getframe() while frame: func = frame.f_code.co_name if func == 'exec_command': cmd = frame.f_locals.get('cmd') if isinstance(cmd, list) and ('gcc' in cmd[0] or 'g++' in cmd[0]): lines = [] for line in rec.msg.splitlines(): if 'warning: ' in line: lines.append(self.colors.YELLOW + line) elif 'error: ' in line: lines.append(self.colors.RED + line) elif 'note: ' in line: lines.append(self.colors.CYAN + line) else: lines.append(line) rec.msg = "\n".join(lines) frame = frame.f_back return Logs.formatter.format(self, rec) def options(opt): Logs.log.handlers[0].setFormatter(ColorGCCFormatter(Logs.colors)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/color_msvc.py0000660000000000000000000000343500000000000023440 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Replaces the default formatter by one which understands MSVC output and colorizes it. # Modified from color_gcc.py __author__ = __maintainer__ = "Alibek Omarov " __copyright__ = "Alibek Omarov, 2019" import sys from waflib import Logs class ColorMSVCFormatter(Logs.formatter): def __init__(self, colors): self.colors = colors Logs.formatter.__init__(self) def parseMessage(self, line, color): # Split messaage from 'disk:filepath: type: message' arr = line.split(':', 3) if len(arr) < 4: return line colored = self.colors.BOLD + arr[0] + ':' + arr[1] + ':' + self.colors.NORMAL colored += color + arr[2] + ':' + self.colors.NORMAL colored += arr[3] return colored def format(self, rec): frame = sys._getframe() while frame: func = frame.f_code.co_name if func == 'exec_command': cmd = frame.f_locals.get('cmd') if isinstance(cmd, list): # Fix file case, it may be CL.EXE or cl.exe argv0 = cmd[0].lower() if 'cl.exe' in argv0: lines = [] # This will not work with "localized" versions # of MSVC for line in rec.msg.splitlines(): if ': warning ' in line: lines.append(self.parseMessage(line, self.colors.YELLOW)) elif ': error ' in line: lines.append(self.parseMessage(line, self.colors.RED)) elif ': fatal error ' in line: lines.append(self.parseMessage(line, self.colors.RED + self.colors.BOLD)) elif ': note: ' in line: lines.append(self.parseMessage(line, self.colors.CYAN)) else: lines.append(line) rec.msg = "\n".join(lines) frame = frame.f_back return Logs.formatter.format(self, rec) def options(opt): Logs.log.handlers[0].setFormatter(ColorMSVCFormatter(Logs.colors)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/color_rvct.py0000660000000000000000000000244500000000000023446 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Replaces the default formatter by one which understands RVCT output and colorizes it. __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2012" import sys import atexit from waflib import Logs errors = [] def show_errors(): for i, e in enumerate(errors): if i > 5: break print("Error: %s" % e) atexit.register(show_errors) class RcvtFormatter(Logs.formatter): def __init__(self, colors): Logs.formatter.__init__(self) self.colors = colors def format(self, rec): frame = sys._getframe() while frame: func = frame.f_code.co_name if func == 'exec_command': cmd = frame.f_locals['cmd'] if isinstance(cmd, list) and ('armcc' in cmd[0] or 'armld' in cmd[0]): lines = [] for line in rec.msg.splitlines(): if 'Warning: ' in line: lines.append(self.colors.YELLOW + line) elif 'Error: ' in line: lines.append(self.colors.RED + line) errors.append(line) elif 'note: ' in line: lines.append(self.colors.CYAN + line) else: lines.append(line) rec.msg = "\n".join(lines) frame = frame.f_back return Logs.formatter.format(self, rec) def options(opt): Logs.log.handlers[0].setFormatter(RcvtFormatter(Logs.colors)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/compat15.py0000660000000000000000000002704500000000000022726 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) """ This file is provided to enable compatibility with waf 1.5 It was enabled by default in waf 1.6, but it is not used in waf 1.7 """ import sys from waflib import ConfigSet, Logs, Options, Scripting, Task, Build, Configure, Node, Runner, TaskGen, Utils, Errors, Context # the following is to bring some compatibility with waf 1.5 "import waflib.Configure → import Configure" sys.modules['Environment'] = ConfigSet ConfigSet.Environment = ConfigSet.ConfigSet sys.modules['Logs'] = Logs sys.modules['Options'] = Options sys.modules['Scripting'] = Scripting sys.modules['Task'] = Task sys.modules['Build'] = Build sys.modules['Configure'] = Configure sys.modules['Node'] = Node sys.modules['Runner'] = Runner sys.modules['TaskGen'] = TaskGen sys.modules['Utils'] = Utils sys.modules['Constants'] = Context Context.SRCDIR = '' Context.BLDDIR = '' from waflib.Tools import c_preproc sys.modules['preproc'] = c_preproc from waflib.Tools import c_config sys.modules['config_c'] = c_config ConfigSet.ConfigSet.copy = ConfigSet.ConfigSet.derive ConfigSet.ConfigSet.set_variant = Utils.nada Utils.pproc = Utils.subprocess Build.BuildContext.add_subdirs = Build.BuildContext.recurse Build.BuildContext.new_task_gen = Build.BuildContext.__call__ Build.BuildContext.is_install = 0 Node.Node.relpath_gen = Node.Node.path_from Utils.pproc = Utils.subprocess Utils.get_term_cols = Logs.get_term_cols def cmd_output(cmd, **kw): silent = False if 'silent' in kw: silent = kw['silent'] del(kw['silent']) if 'e' in kw: tmp = kw['e'] del(kw['e']) kw['env'] = tmp kw['shell'] = isinstance(cmd, str) kw['stdout'] = Utils.subprocess.PIPE if silent: kw['stderr'] = Utils.subprocess.PIPE try: p = Utils.subprocess.Popen(cmd, **kw) output = p.communicate()[0] except OSError as e: raise ValueError(str(e)) if p.returncode: if not silent: msg = "command execution failed: %s -> %r" % (cmd, str(output)) raise ValueError(msg) output = '' return output Utils.cmd_output = cmd_output def name_to_obj(self, s, env=None): if Logs.verbose: Logs.warn('compat: change "name_to_obj(name, env)" by "get_tgen_by_name(name)"') return self.get_tgen_by_name(s) Build.BuildContext.name_to_obj = name_to_obj def env_of_name(self, name): try: return self.all_envs[name] except KeyError: Logs.error('no such environment: '+name) return None Build.BuildContext.env_of_name = env_of_name def set_env_name(self, name, env): self.all_envs[name] = env return env Configure.ConfigurationContext.set_env_name = set_env_name def retrieve(self, name, fromenv=None): try: env = self.all_envs[name] except KeyError: env = ConfigSet.ConfigSet() self.prepare_env(env) self.all_envs[name] = env else: if fromenv: Logs.warn('The environment %s may have been configured already', name) return env Configure.ConfigurationContext.retrieve = retrieve Configure.ConfigurationContext.sub_config = Configure.ConfigurationContext.recurse Configure.ConfigurationContext.check_tool = Configure.ConfigurationContext.load Configure.conftest = Configure.conf Configure.ConfigurationError = Errors.ConfigurationError Utils.WafError = Errors.WafError Options.OptionsContext.sub_options = Options.OptionsContext.recurse Options.OptionsContext.tool_options = Context.Context.load Options.Handler = Options.OptionsContext Task.simple_task_type = Task.task_type_from_func = Task.task_factory Task.Task.classes = Task.classes def setitem(self, key, value): if key.startswith('CCFLAGS'): key = key[1:] self.table[key] = value ConfigSet.ConfigSet.__setitem__ = setitem @TaskGen.feature('d') @TaskGen.before('apply_incpaths') def old_importpaths(self): if getattr(self, 'importpaths', []): self.includes = self.importpaths from waflib import Context eld = Context.load_tool def load_tool(*k, **kw): ret = eld(*k, **kw) if 'set_options' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "set_options" to options') ret.options = ret.set_options if 'detect' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "detect" to "configure"') ret.configure = ret.detect return ret Context.load_tool = load_tool def get_curdir(self): return self.path.abspath() Context.Context.curdir = property(get_curdir, Utils.nada) def get_srcdir(self): return self.srcnode.abspath() Configure.ConfigurationContext.srcdir = property(get_srcdir, Utils.nada) def get_blddir(self): return self.bldnode.abspath() Configure.ConfigurationContext.blddir = property(get_blddir, Utils.nada) Configure.ConfigurationContext.check_message_1 = Configure.ConfigurationContext.start_msg Configure.ConfigurationContext.check_message_2 = Configure.ConfigurationContext.end_msg rev = Context.load_module def load_module(path, encoding=None): ret = rev(path, encoding) if 'set_options' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "set_options" to "options" (%r)', path) ret.options = ret.set_options if 'srcdir' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "srcdir" to "top" (%r)', path) ret.top = ret.srcdir if 'blddir' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "blddir" to "out" (%r)', path) ret.out = ret.blddir Utils.g_module = Context.g_module Options.launch_dir = Context.launch_dir return ret Context.load_module = load_module old_post = TaskGen.task_gen.post def post(self): self.features = self.to_list(self.features) if 'cc' in self.features: if Logs.verbose: Logs.warn('compat: the feature cc does not exist anymore (use "c")') self.features.remove('cc') self.features.append('c') if 'cstaticlib' in self.features: if Logs.verbose: Logs.warn('compat: the feature cstaticlib does not exist anymore (use "cstlib" or "cxxstlib")') self.features.remove('cstaticlib') self.features.append(('cxx' in self.features) and 'cxxstlib' or 'cstlib') if getattr(self, 'ccflags', None): if Logs.verbose: Logs.warn('compat: "ccflags" was renamed to "cflags"') self.cflags = self.ccflags return old_post(self) TaskGen.task_gen.post = post def waf_version(*k, **kw): Logs.warn('wrong version (waf_version was removed in waf 1.6)') Utils.waf_version = waf_version import os @TaskGen.feature('c', 'cxx', 'd') @TaskGen.before('apply_incpaths', 'propagate_uselib_vars') @TaskGen.after('apply_link', 'process_source') def apply_uselib_local(self): """ process the uselib_local attribute execute after apply_link because of the execution order set on 'link_task' """ env = self.env from waflib.Tools.ccroot import stlink_task # 1. the case of the libs defined in the project (visit ancestors first) # the ancestors external libraries (uselib) will be prepended self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'uselib_local', [])) get = self.bld.get_tgen_by_name seen = set() seen_uselib = set() tmp = Utils.deque(names) # consume a copy of the list of names if tmp: if Logs.verbose: Logs.warn('compat: "uselib_local" is deprecated, replace by "use"') while tmp: lib_name = tmp.popleft() # visit dependencies only once if lib_name in seen: continue y = get(lib_name) y.post() seen.add(lib_name) # object has ancestors to process (shared libraries): add them to the end of the list if getattr(y, 'uselib_local', None): for x in self.to_list(getattr(y, 'uselib_local', [])): obj = get(x) obj.post() if getattr(obj, 'link_task', None): if not isinstance(obj.link_task, stlink_task): tmp.append(x) # link task and flags if getattr(y, 'link_task', None): link_name = y.target[y.target.rfind(os.sep) + 1:] if isinstance(y.link_task, stlink_task): env.append_value('STLIB', [link_name]) else: # some linkers can link against programs env.append_value('LIB', [link_name]) # the order self.link_task.set_run_after(y.link_task) # for the recompilation self.link_task.dep_nodes += y.link_task.outputs # add the link path too tmp_path = y.link_task.outputs[0].parent.bldpath() if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', [tmp_path]) # add ancestors uselib too - but only propagate those that have no staticlib defined for v in self.to_list(getattr(y, 'uselib', [])): if v not in seen_uselib: seen_uselib.add(v) if not env['STLIB_' + v]: if not v in self.uselib: self.uselib.insert(0, v) # if the library task generator provides 'export_includes', add to the include path # the export_includes must be a list of paths relative to the other library if getattr(y, 'export_includes', None): self.includes.extend(y.to_incnodes(y.export_includes)) @TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib') @TaskGen.after('apply_link') def apply_objdeps(self): "add the .o files produced by some other object files in the same manner as uselib_local" names = getattr(self, 'add_objects', []) if not names: return names = self.to_list(names) get = self.bld.get_tgen_by_name seen = [] while names: x = names[0] # visit dependencies only once if x in seen: names = names[1:] continue # object does not exist ? y = get(x) # object has ancestors to process first ? update the list of names if getattr(y, 'add_objects', None): added = 0 lst = y.to_list(y.add_objects) lst.reverse() for u in lst: if u in seen: continue added = 1 names = [u]+names if added: continue # list of names modified, loop # safe to process the current object y.post() seen.append(x) for t in getattr(y, 'compiled_tasks', []): self.link_task.inputs.extend(t.outputs) @TaskGen.after('apply_link') def process_obj_files(self): if not hasattr(self, 'obj_files'): return for x in self.obj_files: node = self.path.find_resource(x) self.link_task.inputs.append(node) @TaskGen.taskgen_method def add_obj_file(self, file): """Small example on how to link object files as if they were source obj = bld.create_obj('cc') obj.add_obj_file('foo.o')""" if not hasattr(self, 'obj_files'): self.obj_files = [] if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files') self.obj_files.append(file) old_define = Configure.ConfigurationContext.__dict__['define'] @Configure.conf def define(self, key, val, quote=True, comment=''): old_define(self, key, val, quote, comment) if key.startswith('HAVE_'): self.env[key] = 1 old_undefine = Configure.ConfigurationContext.__dict__['undefine'] @Configure.conf def undefine(self, key, comment=''): old_undefine(self, key, comment) if key.startswith('HAVE_'): self.env[key] = 0 # some people might want to use export_incdirs, but it was renamed def set_incdirs(self, val): Logs.warn('compat: change "export_incdirs" by "export_includes"') self.export_includes = val TaskGen.task_gen.export_incdirs = property(None, set_incdirs) def install_dir(self, path): if not path: return [] destpath = Utils.subst_vars(path, self.env) if self.is_install > 0: Logs.info('* creating %s', destpath) Utils.check_dir(destpath) elif self.is_install < 0: Logs.info('* removing %s', destpath) try: os.remove(destpath) except OSError: pass Build.BuildContext.install_dir = install_dir # before/after names repl = {'apply_core': 'process_source', 'apply_lib_vars': 'process_source', 'apply_obj_vars': 'propagate_uselib_vars', 'exec_rule': 'process_rule' } def after(*k): k = [repl.get(key, key) for key in k] return TaskGen.after_method(*k) def before(*k): k = [repl.get(key, key) for key in k] return TaskGen.before_method(*k) TaskGen.before = before ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/cppcheck.py0000660000000000000000000004320400000000000023050 0ustar00rootroot00000000000000#! /usr/bin/env python # -*- encoding: utf-8 -*- # Michel Mooij, michel.mooij7@gmail.com """ Tool Description ================ This module provides a waf wrapper (i.e. waftool) around the C/C++ source code checking tool 'cppcheck'. See http://cppcheck.sourceforge.net/ for more information on the cppcheck tool itself. Note that many linux distributions already provide a ready to install version of cppcheck. On fedora, for instance, it can be installed using yum: 'sudo yum install cppcheck' Usage ===== In order to use this waftool simply add it to the 'options' and 'configure' functions of your main waf script as shown in the example below: def options(opt): opt.load('cppcheck', tooldir='./waftools') def configure(conf): conf.load('cppcheck') Note that example shown above assumes that the cppcheck waftool is located in the sub directory named 'waftools'. When configured as shown in the example above, cppcheck will automatically perform a source code analysis on all C/C++ build tasks that have been defined in your waf build system. The example shown below for a C program will be used as input for cppcheck when building the task. def build(bld): bld.program(name='foo', src='foobar.c') The result of the source code analysis will be stored both as xml and html files in the build location for the task. Should any error be detected by cppcheck the build will be aborted and a link to the html report will be shown. By default, one index.html file is created for each task generator. A global index.html file can be obtained by setting the following variable in the configuration section: conf.env.CPPCHECK_SINGLE_HTML = False When needed source code checking by cppcheck can be disabled per task, per detected error or warning for a particular task. It can be also be disabled for all tasks. In order to exclude a task from source code checking add the skip option to the task as shown below: def build(bld): bld.program( name='foo', src='foobar.c' cppcheck_skip=True ) When needed problems detected by cppcheck may be suppressed using a file containing a list of suppression rules. The relative or absolute path to this file can be added to the build task as shown in the example below: bld.program( name='bar', src='foobar.c', cppcheck_suppress='bar.suppress' ) A cppcheck suppress file should contain one suppress rule per line. Each of these rules will be passed as an '--suppress=' argument to cppcheck. Dependencies ================ This waftool depends on the python pygments module, it is used for source code syntax highlighting when creating the html reports. see http://pygments.org/ for more information on this package. Remarks ================ The generation of the html report is originally based on the cppcheck-htmlreport.py script that comes shipped with the cppcheck tool. """ import sys import xml.etree.ElementTree as ElementTree from waflib import Task, TaskGen, Logs, Context, Options PYGMENTS_EXC_MSG= ''' The required module 'pygments' could not be found. Please install it using your platform package manager (e.g. apt-get or yum), using 'pip' or 'easy_install', see 'http://pygments.org/download/' for installation instructions. ''' try: import pygments from pygments import formatters, lexers except ImportError as e: Logs.warn(PYGMENTS_EXC_MSG) raise e def options(opt): opt.add_option('--cppcheck-skip', dest='cppcheck_skip', default=False, action='store_true', help='do not check C/C++ sources (default=False)') opt.add_option('--cppcheck-err-resume', dest='cppcheck_err_resume', default=False, action='store_true', help='continue in case of errors (default=False)') opt.add_option('--cppcheck-bin-enable', dest='cppcheck_bin_enable', default='warning,performance,portability,style,unusedFunction', action='store', help="cppcheck option '--enable=' for binaries (default=warning,performance,portability,style,unusedFunction)") opt.add_option('--cppcheck-lib-enable', dest='cppcheck_lib_enable', default='warning,performance,portability,style', action='store', help="cppcheck option '--enable=' for libraries (default=warning,performance,portability,style)") opt.add_option('--cppcheck-std-c', dest='cppcheck_std_c', default='c99', action='store', help='cppcheck standard to use when checking C (default=c99)') opt.add_option('--cppcheck-std-cxx', dest='cppcheck_std_cxx', default='c++03', action='store', help='cppcheck standard to use when checking C++ (default=c++03)') opt.add_option('--cppcheck-check-config', dest='cppcheck_check_config', default=False, action='store_true', help='forced check for missing buildin include files, e.g. stdio.h (default=False)') opt.add_option('--cppcheck-max-configs', dest='cppcheck_max_configs', default='20', action='store', help='maximum preprocessor (--max-configs) define iterations (default=20)') opt.add_option('--cppcheck-jobs', dest='cppcheck_jobs', default='1', action='store', help='number of jobs (-j) to do the checking work (default=1)') def configure(conf): if conf.options.cppcheck_skip: conf.env.CPPCHECK_SKIP = [True] conf.env.CPPCHECK_STD_C = conf.options.cppcheck_std_c conf.env.CPPCHECK_STD_CXX = conf.options.cppcheck_std_cxx conf.env.CPPCHECK_MAX_CONFIGS = conf.options.cppcheck_max_configs conf.env.CPPCHECK_BIN_ENABLE = conf.options.cppcheck_bin_enable conf.env.CPPCHECK_LIB_ENABLE = conf.options.cppcheck_lib_enable conf.env.CPPCHECK_JOBS = conf.options.cppcheck_jobs if conf.options.cppcheck_jobs != '1' and ('unusedFunction' in conf.options.cppcheck_bin_enable or 'unusedFunction' in conf.options.cppcheck_lib_enable or 'all' in conf.options.cppcheck_bin_enable or 'all' in conf.options.cppcheck_lib_enable): Logs.warn('cppcheck: unusedFunction cannot be used with multiple threads, cppcheck will disable it automatically') conf.find_program('cppcheck', var='CPPCHECK') # set to True to get a single index.html file conf.env.CPPCHECK_SINGLE_HTML = False @TaskGen.feature('c') @TaskGen.feature('cxx') def cppcheck_execute(self): if hasattr(self.bld, 'conf'): return if len(self.env.CPPCHECK_SKIP) or Options.options.cppcheck_skip: return if getattr(self, 'cppcheck_skip', False): return task = self.create_task('cppcheck') task.cmd = _tgen_create_cmd(self) task.fatal = [] if not Options.options.cppcheck_err_resume: task.fatal.append('error') def _tgen_create_cmd(self): features = getattr(self, 'features', []) std_c = self.env.CPPCHECK_STD_C std_cxx = self.env.CPPCHECK_STD_CXX max_configs = self.env.CPPCHECK_MAX_CONFIGS bin_enable = self.env.CPPCHECK_BIN_ENABLE lib_enable = self.env.CPPCHECK_LIB_ENABLE jobs = self.env.CPPCHECK_JOBS cmd = self.env.CPPCHECK args = ['--inconclusive','--report-progress','--verbose','--xml','--xml-version=2'] args.append('--max-configs=%s' % max_configs) args.append('-j %s' % jobs) if 'cxx' in features: args.append('--language=c++') args.append('--std=%s' % std_cxx) else: args.append('--language=c') args.append('--std=%s' % std_c) if Options.options.cppcheck_check_config: args.append('--check-config') if set(['cprogram','cxxprogram']) & set(features): args.append('--enable=%s' % bin_enable) else: args.append('--enable=%s' % lib_enable) for src in self.to_list(getattr(self, 'source', [])): if not isinstance(src, str): src = repr(src) args.append(src) for inc in self.to_incnodes(self.to_list(getattr(self, 'includes', []))): if not isinstance(inc, str): inc = repr(inc) args.append('-I%s' % inc) for inc in self.to_incnodes(self.to_list(self.env.INCLUDES)): if not isinstance(inc, str): inc = repr(inc) args.append('-I%s' % inc) return cmd + args class cppcheck(Task.Task): quiet = True def run(self): stderr = self.generator.bld.cmd_and_log(self.cmd, quiet=Context.STDERR, output=Context.STDERR) self._save_xml_report(stderr) defects = self._get_defects(stderr) index = self._create_html_report(defects) self._errors_evaluate(defects, index) return 0 def _save_xml_report(self, s): '''use cppcheck xml result string, add the command string used to invoke cppcheck and save as xml file. ''' header = '%s\n' % s.splitlines()[0] root = ElementTree.fromstring(s) cmd = ElementTree.SubElement(root.find('cppcheck'), 'cmd') cmd.text = str(self.cmd) body = ElementTree.tostring(root).decode('us-ascii') body_html_name = 'cppcheck-%s.xml' % self.generator.get_name() if self.env.CPPCHECK_SINGLE_HTML: body_html_name = 'cppcheck.xml' node = self.generator.path.get_bld().find_or_declare(body_html_name) node.write(header + body) def _get_defects(self, xml_string): '''evaluate the xml string returned by cppcheck (on sdterr) and use it to create a list of defects. ''' defects = [] for error in ElementTree.fromstring(xml_string).iter('error'): defect = {} defect['id'] = error.get('id') defect['severity'] = error.get('severity') defect['msg'] = str(error.get('msg')).replace('<','<') defect['verbose'] = error.get('verbose') for location in error.findall('location'): defect['file'] = location.get('file') defect['line'] = str(int(location.get('line')) - 1) defects.append(defect) return defects def _create_html_report(self, defects): files, css_style_defs = self._create_html_files(defects) index = self._create_html_index(files) self._create_css_file(css_style_defs) return index def _create_html_files(self, defects): sources = {} defects = [defect for defect in defects if 'file' in defect] for defect in defects: name = defect['file'] if not name in sources: sources[name] = [defect] else: sources[name].append(defect) files = {} css_style_defs = None bpath = self.generator.path.get_bld().abspath() names = list(sources.keys()) for i in range(0,len(names)): name = names[i] if self.env.CPPCHECK_SINGLE_HTML: htmlfile = 'cppcheck/%i.html' % (i) else: htmlfile = 'cppcheck/%s%i.html' % (self.generator.get_name(),i) errors = sources[name] files[name] = { 'htmlfile': '%s/%s' % (bpath, htmlfile), 'errors': errors } css_style_defs = self._create_html_file(name, htmlfile, errors) return files, css_style_defs def _create_html_file(self, sourcefile, htmlfile, errors): name = self.generator.get_name() root = ElementTree.fromstring(CPPCHECK_HTML_FILE) title = root.find('head/title') title.text = 'cppcheck - report - %s' % name body = root.find('body') for div in body.findall('div'): if div.get('id') == 'page': page = div break for div in page.findall('div'): if div.get('id') == 'header': h1 = div.find('h1') h1.text = 'cppcheck report - %s' % name if div.get('id') == 'menu': indexlink = div.find('a') if self.env.CPPCHECK_SINGLE_HTML: indexlink.attrib['href'] = 'index.html' else: indexlink.attrib['href'] = 'index-%s.html' % name if div.get('id') == 'content': content = div srcnode = self.generator.bld.root.find_node(sourcefile) hl_lines = [e['line'] for e in errors if 'line' in e] formatter = CppcheckHtmlFormatter(linenos=True, style='colorful', hl_lines=hl_lines, lineanchors='line') formatter.errors = [e for e in errors if 'line' in e] css_style_defs = formatter.get_style_defs('.highlight') lexer = pygments.lexers.guess_lexer_for_filename(sourcefile, "") s = pygments.highlight(srcnode.read(), lexer, formatter) table = ElementTree.fromstring(s) content.append(table) s = ElementTree.tostring(root, method='html').decode('us-ascii') s = CCPCHECK_HTML_TYPE + s node = self.generator.path.get_bld().find_or_declare(htmlfile) node.write(s) return css_style_defs def _create_html_index(self, files): name = self.generator.get_name() root = ElementTree.fromstring(CPPCHECK_HTML_FILE) title = root.find('head/title') title.text = 'cppcheck - report - %s' % name body = root.find('body') for div in body.findall('div'): if div.get('id') == 'page': page = div break for div in page.findall('div'): if div.get('id') == 'header': h1 = div.find('h1') h1.text = 'cppcheck report - %s' % name if div.get('id') == 'content': content = div self._create_html_table(content, files) if div.get('id') == 'menu': indexlink = div.find('a') if self.env.CPPCHECK_SINGLE_HTML: indexlink.attrib['href'] = 'index.html' else: indexlink.attrib['href'] = 'index-%s.html' % name s = ElementTree.tostring(root, method='html').decode('us-ascii') s = CCPCHECK_HTML_TYPE + s index_html_name = 'cppcheck/index-%s.html' % name if self.env.CPPCHECK_SINGLE_HTML: index_html_name = 'cppcheck/index.html' node = self.generator.path.get_bld().find_or_declare(index_html_name) node.write(s) return node def _create_html_table(self, content, files): table = ElementTree.fromstring(CPPCHECK_HTML_TABLE) for name, val in files.items(): f = val['htmlfile'] s = '%s\n' % (f,name) row = ElementTree.fromstring(s) table.append(row) errors = sorted(val['errors'], key=lambda e: int(e['line']) if 'line' in e else sys.maxint) for e in errors: if not 'line' in e: s = '%s%s%s\n' % (e['id'], e['severity'], e['msg']) else: attr = '' if e['severity'] == 'error': attr = 'class="error"' s = '%s' % (f, e['line'], e['line']) s+= '%s%s%s\n' % (e['id'], e['severity'], attr, e['msg']) row = ElementTree.fromstring(s) table.append(row) content.append(table) def _create_css_file(self, css_style_defs): css = str(CPPCHECK_CSS_FILE) if css_style_defs: css = "%s\n%s\n" % (css, css_style_defs) node = self.generator.path.get_bld().find_or_declare('cppcheck/style.css') node.write(css) def _errors_evaluate(self, errors, http_index): name = self.generator.get_name() fatal = self.fatal severity = [err['severity'] for err in errors] problems = [err for err in errors if err['severity'] != 'information'] if set(fatal) & set(severity): exc = "\n" exc += "\nccpcheck detected fatal error(s) in task '%s', see report for details:" % name exc += "\n file://%r" % (http_index) exc += "\n" self.generator.bld.fatal(exc) elif len(problems): msg = "\nccpcheck detected (possible) problem(s) in task '%s', see report for details:" % name msg += "\n file://%r" % http_index msg += "\n" Logs.error(msg) class CppcheckHtmlFormatter(pygments.formatters.HtmlFormatter): errors = [] def wrap(self, source, outfile): line_no = 1 for i, t in super(CppcheckHtmlFormatter, self).wrap(source, outfile): # If this is a source code line we want to add a span tag at the end. if i == 1: for error in self.errors: if int(error['line']) == line_no: t = t.replace('\n', CPPCHECK_HTML_ERROR % error['msg']) line_no += 1 yield i, t CCPCHECK_HTML_TYPE = \ '\n' CPPCHECK_HTML_FILE = """ ]> cppcheck - report - XXX
 
""" CPPCHECK_HTML_TABLE = """
Line Id Severity Message
""" CPPCHECK_HTML_ERROR = \ '<--- %s\n' CPPCHECK_CSS_FILE = """ body.body { font-family: Arial; font-size: 13px; background-color: black; padding: 0px; margin: 0px; } .error { font-family: Arial; font-size: 13px; background-color: #ffb7b7; padding: 0px; margin: 0px; } th, td { min-width: 100px; text-align: left; } #page-header { clear: both; width: 1200px; margin: 20px auto 0px auto; height: 10px; border-bottom-width: 2px; border-bottom-style: solid; border-bottom-color: #aaaaaa; } #page { width: 1160px; margin: auto; border-left-width: 2px; border-left-style: solid; border-left-color: #aaaaaa; border-right-width: 2px; border-right-style: solid; border-right-color: #aaaaaa; background-color: White; padding: 20px; } #page-footer { clear: both; width: 1200px; margin: auto; height: 10px; border-top-width: 2px; border-top-style: solid; border-top-color: #aaaaaa; } #header { width: 100%; height: 70px; background-image: url(logo.png); background-repeat: no-repeat; background-position: left top; border-bottom-style: solid; border-bottom-width: thin; border-bottom-color: #aaaaaa; } #menu { margin-top: 5px; text-align: left; float: left; width: 100px; height: 300px; } #menu > a { margin-left: 10px; display: block; } #content { float: left; width: 1020px; margin: 5px; padding: 0px 10px 10px 10px; border-left-style: solid; border-left-width: thin; border-left-color: #aaaaaa; } #footer { padding-bottom: 5px; padding-top: 5px; border-top-style: solid; border-top-width: thin; border-top-color: #aaaaaa; clear: both; font-size: 10px; } #footer > div { float: left; width: 33%; } """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/cpplint.py0000660000000000000000000001654200000000000022746 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # # written by Sylvain Rouquette, 2014 ''' This is an extra tool, not bundled with the default waf binary. To add the cpplint tool to the waf file: $ ./waf-light --tools=compat15,cpplint this tool also requires cpplint for python. If you have PIP, you can install it like this: pip install cpplint When using this tool, the wscript will look like: def options(opt): opt.load('compiler_cxx cpplint') def configure(conf): conf.load('compiler_cxx cpplint') # optional, you can also specify them on the command line conf.env.CPPLINT_FILTERS = ','.join(( '-whitespace/newline', # c++11 lambda '-readability/braces', # c++11 constructor '-whitespace/braces', # c++11 constructor '-build/storage_class', # c++11 for-range '-whitespace/blank_line', # user pref '-whitespace/labels' # user pref )) def build(bld): bld(features='cpplint', source='main.cpp', target='app') # add include files, because they aren't usually built bld(features='cpplint', source=bld.path.ant_glob('**/*.hpp')) ''' from __future__ import absolute_import import sys, re import logging from waflib import Errors, Task, TaskGen, Logs, Options, Node, Utils critical_errors = 0 CPPLINT_FORMAT = '[CPPLINT] %(filename)s:\nline %(linenum)s, severity %(confidence)s, category: %(category)s\n%(message)s\n' RE_EMACS = re.compile(r'(?P.*):(?P\d+): (?P.*) \[(?P.*)\] \[(?P\d+)\]') CPPLINT_RE = { 'waf': RE_EMACS, 'emacs': RE_EMACS, 'vs7': re.compile(r'(?P.*)\((?P\d+)\): (?P.*) \[(?P.*)\] \[(?P\d+)\]'), 'eclipse': re.compile(r'(?P.*):(?P\d+): warning: (?P.*) \[(?P.*)\] \[(?P\d+)\]'), } CPPLINT_STR = ('${CPPLINT} ' '--verbose=${CPPLINT_LEVEL} ' '--output=${CPPLINT_OUTPUT} ' '--filter=${CPPLINT_FILTERS} ' '--root=${CPPLINT_ROOT} ' '--linelength=${CPPLINT_LINE_LENGTH} ') def options(opt): opt.add_option('--cpplint-filters', type='string', default='', dest='CPPLINT_FILTERS', help='add filters to cpplint') opt.add_option('--cpplint-length', type='int', default=80, dest='CPPLINT_LINE_LENGTH', help='specify the line length (default: 80)') opt.add_option('--cpplint-level', default=1, type='int', dest='CPPLINT_LEVEL', help='specify the log level (default: 1)') opt.add_option('--cpplint-break', default=5, type='int', dest='CPPLINT_BREAK', help='break the build if error >= level (default: 5)') opt.add_option('--cpplint-root', type='string', default='', dest='CPPLINT_ROOT', help='root directory used to derive header guard') opt.add_option('--cpplint-skip', action='store_true', default=False, dest='CPPLINT_SKIP', help='skip cpplint during build') opt.add_option('--cpplint-output', type='string', default='waf', dest='CPPLINT_OUTPUT', help='select output format (waf, emacs, vs7, eclipse)') def configure(conf): try: conf.find_program('cpplint', var='CPPLINT') except Errors.ConfigurationError: conf.env.CPPLINT_SKIP = True class cpplint_formatter(Logs.formatter, object): def __init__(self, fmt): logging.Formatter.__init__(self, CPPLINT_FORMAT) self.fmt = fmt def format(self, rec): if self.fmt == 'waf': result = CPPLINT_RE[self.fmt].match(rec.msg).groupdict() rec.msg = CPPLINT_FORMAT % result if rec.levelno <= logging.INFO: rec.c1 = Logs.colors.CYAN return super(cpplint_formatter, self).format(rec) class cpplint_handler(Logs.log_handler, object): def __init__(self, stream=sys.stderr, **kw): super(cpplint_handler, self).__init__(stream, **kw) self.stream = stream def emit(self, rec): rec.stream = self.stream self.emit_override(rec) self.flush() class cpplint_wrapper(object): def __init__(self, logger, threshold, fmt): self.logger = logger self.threshold = threshold self.fmt = fmt def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if isinstance(exc_value, Utils.subprocess.CalledProcessError): messages = [m for m in exc_value.output.splitlines() if 'Done processing' not in m and 'Total errors found' not in m] for message in messages: self.write(message) return True def write(self, message): global critical_errors result = CPPLINT_RE[self.fmt].match(message) if not result: return level = int(result.groupdict()['confidence']) if level >= self.threshold: critical_errors += 1 if level <= 2: self.logger.info(message) elif level <= 4: self.logger.warning(message) else: self.logger.error(message) cpplint_logger = None def get_cpplint_logger(fmt): global cpplint_logger if cpplint_logger: return cpplint_logger cpplint_logger = logging.getLogger('cpplint') hdlr = cpplint_handler() hdlr.setFormatter(cpplint_formatter(fmt)) cpplint_logger.addHandler(hdlr) cpplint_logger.setLevel(logging.DEBUG) return cpplint_logger class cpplint(Task.Task): color = 'PINK' def __init__(self, *k, **kw): super(cpplint, self).__init__(*k, **kw) def run(self): global critical_errors with cpplint_wrapper(get_cpplint_logger(self.env.CPPLINT_OUTPUT), self.env.CPPLINT_BREAK, self.env.CPPLINT_OUTPUT): params = {key: str(self.env[key]) for key in self.env if 'CPPLINT_' in key} if params['CPPLINT_OUTPUT'] is 'waf': params['CPPLINT_OUTPUT'] = 'emacs' params['CPPLINT'] = self.env.get_flat('CPPLINT') cmd = Utils.subst_vars(CPPLINT_STR, params) env = self.env.env or None Utils.subprocess.check_output(cmd + self.inputs[0].abspath(), stderr=Utils.subprocess.STDOUT, env=env, shell=True) return critical_errors @TaskGen.extension('.h', '.hh', '.hpp', '.hxx') def cpplint_includes(self, node): pass @TaskGen.feature('cpplint') @TaskGen.before_method('process_source') def post_cpplint(self): if not self.env.CPPLINT_INITIALIZED: for key, value in Options.options.__dict__.items(): if not key.startswith('CPPLINT_') or self.env[key]: continue self.env[key] = value self.env.CPPLINT_INITIALIZED = True if self.env.CPPLINT_SKIP: return if not self.env.CPPLINT_OUTPUT in CPPLINT_RE: return for src in self.to_list(getattr(self, 'source', [])): if isinstance(src, Node.Node): node = src else: node = self.path.find_or_declare(src) if not node: self.bld.fatal('Could not find %r' % src) self.create_task('cpplint', node) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/cross_gnu.py0000660000000000000000000001373500000000000023300 0ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 vi:ts=4:noexpandtab # Tool to provide dedicated variables for cross-compilation __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ This tool allows to use environment variables to define cross-compilation variables intended for build variants. The variables are obtained from the environment in 3 ways: 1. By defining CHOST, they can be derived as ${CHOST}-${TOOL} 2. By defining HOST_x 3. By defining ${CHOST//-/_}_x else one can set ``cfg.env.CHOST`` in ``wscript`` before loading ``cross_gnu``. Usage: - In your build script:: def configure(cfg): ... for variant in x_variants: setenv(variant) conf.load('cross_gnu') conf.xcheck_host_var('POUET') ... - Then:: CHOST=arm-hardfloat-linux-gnueabi waf configure env arm-hardfloat-linux-gnueabi-CC="clang -..." waf configure CFLAGS=... CHOST=arm-hardfloat-linux-gnueabi HOST_CFLAGS=-g waf configure HOST_CC="clang -..." waf configure This example ``wscript`` compiles to Microchip PIC (xc16-gcc-xyz must be in PATH): .. code:: python from waflib import Configure #from https://gist.github.com/rpuntaie/2bddfb5d7b77db26415ee14371289971 import waf_variants variants='pc fw/variant1 fw/variant2'.split() top = "." out = "../build" PIC = '33FJ128GP804' #dsPICxxx @Configure.conf def gcc_modifier_xc16(cfg): v = cfg.env v.cprogram_PATTERN = '%s.elf' v.LINKFLAGS_cprogram = ','.join(['-Wl','','','--defsym=__MPLAB_BUILD=0','','--script=p'+PIC+'.gld', '--stack=16','--check-sections','--data-init','--pack-data','--handles','--isr','--no-gc-sections', '--fill-upper=0','--stackguard=16','--no-force-link','--smart-io']) #,'--report-mem']) v.CFLAGS_cprogram=['-mcpu='+PIC,'-omf=elf','-mlarge-code','-msmart-io=1', '-msfr-warn=off','-mno-override-inline','-finline','-Winline'] def configure(cfg): if 'fw' in cfg.variant: #firmware cfg.env.DEST_OS = 'xc16' #cfg.env.CHOST = 'xc16' #works too cfg.load('c cross_gnu') #cfg.env.CHOST becomes ['xc16'] ... else: #configure for pc SW ... def build(bld): if 'fw' in bld.variant: #firmware bld.program(source='maintst.c', target='maintst'); bld(source='maintst.elf', target='maintst.hex', rule="xc16-bin2hex ${SRC} -a -omf=elf") else: #build for pc SW ... """ import os from waflib import Utils, Configure from waflib.Tools import ccroot, gcc try: from shlex import quote except ImportError: from pipes import quote def get_chost_stuff(conf): """ Get the CHOST environment variable contents """ chost = None chost_envar = None if conf.env.CHOST: chost = conf.env.CHOST[0] chost_envar = chost.replace('-', '_') return chost, chost_envar @Configure.conf def xcheck_var(conf, name, wafname=None, cross=False): wafname = wafname or name if wafname in conf.env: value = conf.env[wafname] if isinstance(value, str): value = [value] else: envar = os.environ.get(name) if not envar: return value = Utils.to_list(envar) if envar != '' else [envar] conf.env[wafname] = value if cross: pretty = 'cross-compilation %s' % wafname else: pretty = wafname conf.msg('Will use %s' % pretty, " ".join(quote(x) for x in value)) @Configure.conf def xcheck_host_prog(conf, name, tool, wafname=None): wafname = wafname or name chost, chost_envar = get_chost_stuff(conf) specific = None if chost: specific = os.environ.get('%s_%s' % (chost_envar, name)) if specific: value = Utils.to_list(specific) conf.env[wafname] += value conf.msg('Will use cross-compilation %s from %s_%s' % (name, chost_envar, name), " ".join(quote(x) for x in value)) return else: envar = os.environ.get('HOST_%s' % name) if envar is not None: value = Utils.to_list(envar) conf.env[wafname] = value conf.msg('Will use cross-compilation %s from HOST_%s' % (name, name), " ".join(quote(x) for x in value)) return if conf.env[wafname]: return value = None if chost: value = '%s-%s' % (chost, tool) if value: conf.env[wafname] = value conf.msg('Will use cross-compilation %s from CHOST' % wafname, value) @Configure.conf def xcheck_host_envar(conf, name, wafname=None): wafname = wafname or name chost, chost_envar = get_chost_stuff(conf) specific = None if chost: specific = os.environ.get('%s_%s' % (chost_envar, name)) if specific: value = Utils.to_list(specific) conf.env[wafname] += value conf.msg('Will use cross-compilation %s from %s_%s' \ % (name, chost_envar, name), " ".join(quote(x) for x in value)) return envar = os.environ.get('HOST_%s' % name) if envar is None: return value = Utils.to_list(envar) if envar != '' else [envar] conf.env[wafname] = value conf.msg('Will use cross-compilation %s from HOST_%s' % (name, name), " ".join(quote(x) for x in value)) @Configure.conf def xcheck_host(conf): conf.xcheck_var('CHOST', cross=True) conf.env.CHOST = conf.env.CHOST or [conf.env.DEST_OS] conf.env.DEST_OS = conf.env.CHOST[0].replace('-','_') conf.xcheck_host_prog('CC', 'gcc') conf.xcheck_host_prog('CXX', 'g++') conf.xcheck_host_prog('LINK_CC', 'gcc') conf.xcheck_host_prog('LINK_CXX', 'g++') conf.xcheck_host_prog('AR', 'ar') conf.xcheck_host_prog('AS', 'as') conf.xcheck_host_prog('LD', 'ld') conf.xcheck_host_envar('CFLAGS') conf.xcheck_host_envar('CXXFLAGS') conf.xcheck_host_envar('LDFLAGS', 'LINKFLAGS') conf.xcheck_host_envar('LIB') conf.xcheck_host_envar('PKG_CONFIG_LIBDIR') conf.xcheck_host_envar('PKG_CONFIG_PATH') if not conf.env.env: conf.env.env = {} conf.env.env.update(os.environ) if conf.env.PKG_CONFIG_LIBDIR: conf.env.env['PKG_CONFIG_LIBDIR'] = conf.env.PKG_CONFIG_LIBDIR[0] if conf.env.PKG_CONFIG_PATH: conf.env.env['PKG_CONFIG_PATH'] = conf.env.PKG_CONFIG_PATH[0] def configure(conf): """ Configuration example for gcc, it will not work for g++/clang/clang++ """ conf.xcheck_host() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/cython.py0000660000000000000000000001014500000000000022572 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2015 import re from waflib import Task, Logs from waflib.TaskGen import extension cy_api_pat = re.compile(r'\s*?cdef\s*?(public|api)\w*') re_cyt = re.compile(r""" ^\s* # must begin with some whitespace characters (?:from\s+(\w+)(?:\.\w+)*\s+)? # optionally match "from foo(.baz)" and capture foo c?import\s(\w+|[*]) # require "import bar" and capture bar """, re.M | re.VERBOSE) @extension('.pyx') def add_cython_file(self, node): """ Process a *.pyx* file given in the list of source files. No additional feature is required:: def build(bld): bld(features='c cshlib pyext', source='main.c foo.pyx', target='app') """ ext = '.c' if 'cxx' in self.features: self.env.append_unique('CYTHONFLAGS', '--cplus') ext = '.cc' for x in getattr(self, 'cython_includes', []): # TODO re-use these nodes in "scan" below d = self.path.find_dir(x) if d: self.env.append_unique('CYTHONFLAGS', '-I%s' % d.abspath()) tsk = self.create_task('cython', node, node.change_ext(ext)) self.source += tsk.outputs class cython(Task.Task): run_str = '${CYTHON} ${CYTHONFLAGS} -o ${TGT[0].abspath()} ${SRC}' color = 'GREEN' vars = ['INCLUDES'] """ Rebuild whenever the INCLUDES change. The variables such as CYTHONFLAGS will be appended by the metaclass. """ ext_out = ['.h'] """ The creation of a .h file is known only after the build has begun, so it is not possible to compute a build order just by looking at the task inputs/outputs. """ def runnable_status(self): """ Perform a double-check to add the headers created by cython to the output nodes. The scanner is executed only when the cython task must be executed (optimization). """ ret = super(cython, self).runnable_status() if ret == Task.ASK_LATER: return ret for x in self.generator.bld.raw_deps[self.uid()]: if x.startswith('header:'): self.outputs.append(self.inputs[0].parent.find_or_declare(x.replace('header:', ''))) return super(cython, self).runnable_status() def post_run(self): for x in self.outputs: if x.name.endswith('.h'): if not x.exists(): if Logs.verbose: Logs.warn('Expected %r', x.abspath()) x.write('') return Task.Task.post_run(self) def scan(self): """ Return the dependent files (.pxd) by looking in the include folders. Put the headers to generate in the custom list "bld.raw_deps". To inspect the scanne results use:: $ waf clean build --zones=deps """ node = self.inputs[0] txt = node.read() mods = set() for m in re_cyt.finditer(txt): if m.group(1): # matches "from foo import bar" mods.add(m.group(1)) else: mods.add(m.group(2)) Logs.debug('cython: mods %r', mods) incs = getattr(self.generator, 'cython_includes', []) incs = [self.generator.path.find_dir(x) for x in incs] incs.append(node.parent) found = [] missing = [] for x in sorted(mods): for y in incs: k = y.find_resource(x + '.pxd') if k: found.append(k) break else: missing.append(x) # the cython file implicitly depends on a pxd file that might be present implicit = node.parent.find_resource(node.name[:-3] + 'pxd') if implicit: found.append(implicit) Logs.debug('cython: found %r', found) # Now the .h created - store them in bld.raw_deps for later use has_api = False has_public = False for l in txt.splitlines(): if cy_api_pat.match(l): if ' api ' in l: has_api = True if ' public ' in l: has_public = True name = node.name.replace('.pyx', '') if has_api: missing.append('header:%s_api.h' % name) if has_public: missing.append('header:%s.h' % name) return (found, missing) def options(ctx): ctx.add_option('--cython-flags', action='store', default='', help='space separated list of flags to pass to cython') def configure(ctx): if not ctx.env.CC and not ctx.env.CXX: ctx.fatal('Load a C/C++ compiler first') if not ctx.env.PYTHON: ctx.fatal('Load the python tool first!') ctx.find_program('cython', var='CYTHON') if hasattr(ctx.options, 'cython_flags'): ctx.env.CYTHONFLAGS = ctx.options.cython_flags ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/dcc.py0000660000000000000000000000357300000000000022026 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Jérôme Carretero, 2011 (zougloub) from waflib import Options from waflib.Tools import ccroot from waflib.Configure import conf @conf def find_dcc(conf): conf.find_program(['dcc'], var='CC', path_list=getattr(Options.options, 'diabbindir', "")) conf.env.CC_NAME = 'dcc' @conf def find_dld(conf): conf.find_program(['dld'], var='LINK_CC', path_list=getattr(Options.options, 'diabbindir', "")) conf.env.LINK_CC_NAME = 'dld' @conf def find_dar(conf): conf.find_program(['dar'], var='AR', path_list=getattr(Options.options, 'diabbindir', "")) conf.env.AR_NAME = 'dar' conf.env.ARFLAGS = 'rcs' @conf def find_ddump(conf): conf.find_program(['ddump'], var='DDUMP', path_list=getattr(Options.options, 'diabbindir', "")) @conf def dcc_common_flags(conf): v = conf.env v['CC_SRC_F'] = [] v['CC_TGT_F'] = ['-c', '-o'] # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = [] v['CCLNK_TGT_F'] = ['-o'] v['CPPPATH_ST'] = '-I%s' v['DEFINES_ST'] = '-D%s' v['LIB_ST'] = '-l:%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STLIB_ST'] = '-l:%s' v['STLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' #v['STLIB_MARKER'] = '-Wl,-Bstatic' # program v['cprogram_PATTERN'] = '%s.elf' # static lib v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] v['cstlib_PATTERN'] = 'lib%s.a' def configure(conf): conf.find_dcc() conf.find_dar() conf.find_dld() conf.find_ddump() conf.dcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() def options(opt): """ Add the ``--with-diab-bindir`` command-line options. """ opt.add_option('--with-diab-bindir', type='string', dest='diabbindir', help = 'Specify alternate diab bin folder', default="") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/distnet.py0000660000000000000000000002650500000000000022747 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ waf-powered distributed network builds, with a network cache. Caching files from a server has advantages over a NFS/Samba shared folder: - builds are much faster because they use local files - builds just continue to work in case of a network glitch - permissions are much simpler to manage """ import os, urllib, tarfile, re, shutil, tempfile, sys from collections import OrderedDict from waflib import Context, Utils, Logs try: from urllib.parse import urlencode except ImportError: urlencode = urllib.urlencode def safe_urlencode(data): x = urlencode(data) try: x = x.encode('utf-8') except Exception: pass return x try: from urllib.error import URLError except ImportError: from urllib2 import URLError try: from urllib.request import Request, urlopen except ImportError: from urllib2 import Request, urlopen DISTNETCACHE = os.environ.get('DISTNETCACHE', '/tmp/distnetcache') DISTNETSERVER = os.environ.get('DISTNETSERVER', 'http://localhost:8000/cgi-bin/') TARFORMAT = 'w:bz2' TIMEOUT = 60 REQUIRES = 'requires.txt' re_com = re.compile(r'\s*#.*', re.M) def total_version_order(num): lst = num.split('.') template = '%10s' * len(lst) ret = template % tuple(lst) return ret def get_distnet_cache(): return getattr(Context.g_module, 'DISTNETCACHE', DISTNETCACHE) def get_server_url(): return getattr(Context.g_module, 'DISTNETSERVER', DISTNETSERVER) def get_download_url(): return '%s/download.py' % get_server_url() def get_upload_url(): return '%s/upload.py' % get_server_url() def get_resolve_url(): return '%s/resolve.py' % get_server_url() def send_package_name(): out = getattr(Context.g_module, 'out', 'build') pkgfile = '%s/package_to_upload.tarfile' % out return pkgfile class package(Context.Context): fun = 'package' cmd = 'package' def execute(self): try: files = self.files except AttributeError: files = self.files = [] Context.Context.execute(self) pkgfile = send_package_name() if not pkgfile in files: if not REQUIRES in files: files.append(REQUIRES) self.make_tarfile(pkgfile, files, add_to_package=False) def make_tarfile(self, filename, files, **kw): if kw.get('add_to_package', True): self.files.append(filename) with tarfile.open(filename, TARFORMAT) as tar: endname = os.path.split(filename)[-1] endname = endname.split('.')[0] + '/' for x in files: tarinfo = tar.gettarinfo(x, x) tarinfo.uid = tarinfo.gid = 0 tarinfo.uname = tarinfo.gname = 'root' tarinfo.size = os.stat(x).st_size # TODO - more archive creation options? if kw.get('bare', True): tarinfo.name = os.path.split(x)[1] else: tarinfo.name = endname + x # todo, if tuple, then.. Logs.debug('distnet: adding %r to %s', tarinfo.name, filename) with open(x, 'rb') as f: tar.addfile(tarinfo, f) Logs.info('Created %s', filename) class publish(Context.Context): fun = 'publish' cmd = 'publish' def execute(self): if hasattr(Context.g_module, 'publish'): Context.Context.execute(self) mod = Context.g_module rfile = getattr(self, 'rfile', send_package_name()) if not os.path.isfile(rfile): self.fatal('Create the release file with "waf release" first! %r' % rfile) fdata = Utils.readf(rfile, m='rb') data = safe_urlencode([('pkgdata', fdata), ('pkgname', mod.APPNAME), ('pkgver', mod.VERSION)]) req = Request(get_upload_url(), data) response = urlopen(req, timeout=TIMEOUT) data = response.read().strip() if sys.hexversion>0x300000f: data = data.decode('utf-8') if data != 'ok': self.fatal('Could not publish the package %r' % data) class constraint(object): def __init__(self, line=''): self.required_line = line self.info = [] line = line.strip() if not line: return lst = line.split(',') if lst: self.pkgname = lst[0] self.required_version = lst[1] for k in lst: a, b, c = k.partition('=') if a and c: self.info.append((a, c)) def __str__(self): buf = [] buf.append(self.pkgname) buf.append(self.required_version) for k in self.info: buf.append('%s=%s' % k) return ','.join(buf) def __repr__(self): return "requires %s-%s" % (self.pkgname, self.required_version) def human_display(self, pkgname, pkgver): return '%s-%s requires %s-%s' % (pkgname, pkgver, self.pkgname, self.required_version) def why(self): ret = [] for x in self.info: if x[0] == 'reason': ret.append(x[1]) return ret def add_reason(self, reason): self.info.append(('reason', reason)) def parse_constraints(text): assert(text is not None) constraints = [] text = re.sub(re_com, '', text) lines = text.splitlines() for line in lines: line = line.strip() if not line: continue constraints.append(constraint(line)) return constraints def list_package_versions(cachedir, pkgname): pkgdir = os.path.join(cachedir, pkgname) try: versions = os.listdir(pkgdir) except OSError: return [] versions.sort(key=total_version_order) versions.reverse() return versions class package_reader(Context.Context): cmd = 'solver' fun = 'solver' def __init__(self, **kw): Context.Context.__init__(self, **kw) self.myproject = getattr(Context.g_module, 'APPNAME', 'project') self.myversion = getattr(Context.g_module, 'VERSION', '1.0') self.cache_constraints = {} self.constraints = [] def compute_dependencies(self, filename=REQUIRES): text = Utils.readf(filename) data = safe_urlencode([('text', text)]) if '--offline' in sys.argv: self.constraints = self.local_resolve(text) else: req = Request(get_resolve_url(), data) try: response = urlopen(req, timeout=TIMEOUT) except URLError as e: Logs.warn('The package server is down! %r', e) self.constraints = self.local_resolve(text) else: ret = response.read() try: ret = ret.decode('utf-8') except Exception: pass self.trace(ret) self.constraints = parse_constraints(ret) self.check_errors() def check_errors(self): errors = False for c in self.constraints: if not c.required_version: errors = True reasons = c.why() if len(reasons) == 1: Logs.error('%s but no matching package could be found in this repository', reasons[0]) else: Logs.error('Conflicts on package %r:', c.pkgname) for r in reasons: Logs.error(' %s', r) if errors: self.fatal('The package requirements cannot be satisfied!') def load_constraints(self, pkgname, pkgver, requires=REQUIRES): try: return self.cache_constraints[(pkgname, pkgver)] except KeyError: text = Utils.readf(os.path.join(get_distnet_cache(), pkgname, pkgver, requires)) ret = parse_constraints(text) self.cache_constraints[(pkgname, pkgver)] = ret return ret def apply_constraint(self, domain, constraint): vname = constraint.required_version.replace('*', '.*') rev = re.compile(vname, re.M) ret = [x for x in domain if rev.match(x)] return ret def trace(self, *k): if getattr(self, 'debug', None): Logs.error(*k) def solve(self, packages_to_versions={}, packages_to_constraints={}, pkgname='', pkgver='', todo=[], done=[]): # breadth first search n_packages_to_versions = dict(packages_to_versions) n_packages_to_constraints = dict(packages_to_constraints) self.trace("calling solve with %r %r %r" % (packages_to_versions, todo, done)) done = done + [pkgname] constraints = self.load_constraints(pkgname, pkgver) self.trace("constraints %r" % constraints) for k in constraints: try: domain = n_packages_to_versions[k.pkgname] except KeyError: domain = list_package_versions(get_distnet_cache(), k.pkgname) self.trace("constraints?") if not k.pkgname in done: todo = todo + [k.pkgname] self.trace("domain before %s -> %s, %r" % (pkgname, k.pkgname, domain)) # apply the constraint domain = self.apply_constraint(domain, k) self.trace("domain after %s -> %s, %r" % (pkgname, k.pkgname, domain)) n_packages_to_versions[k.pkgname] = domain # then store the constraint applied constraints = list(packages_to_constraints.get(k.pkgname, [])) constraints.append((pkgname, pkgver, k)) n_packages_to_constraints[k.pkgname] = constraints if not domain: self.trace("no domain while processing constraint %r from %r %r" % (domain, pkgname, pkgver)) return (n_packages_to_versions, n_packages_to_constraints) # next package on the todo list if not todo: return (n_packages_to_versions, n_packages_to_constraints) n_pkgname = todo[0] n_pkgver = n_packages_to_versions[n_pkgname][0] tmp = dict(n_packages_to_versions) tmp[n_pkgname] = [n_pkgver] self.trace("fixed point %s" % n_pkgname) return self.solve(tmp, n_packages_to_constraints, n_pkgname, n_pkgver, todo[1:], done) def get_results(self): return '\n'.join([str(c) for c in self.constraints]) def solution_to_constraints(self, versions, constraints): solution = [] for p in versions: c = constraint() solution.append(c) c.pkgname = p if versions[p]: c.required_version = versions[p][0] else: c.required_version = '' for (from_pkgname, from_pkgver, c2) in constraints.get(p, ''): c.add_reason(c2.human_display(from_pkgname, from_pkgver)) return solution def local_resolve(self, text): self.cache_constraints[(self.myproject, self.myversion)] = parse_constraints(text) p2v = OrderedDict({self.myproject: [self.myversion]}) (versions, constraints) = self.solve(p2v, {}, self.myproject, self.myversion, []) return self.solution_to_constraints(versions, constraints) def download_to_file(self, pkgname, pkgver, subdir, tmp): data = safe_urlencode([('pkgname', pkgname), ('pkgver', pkgver), ('pkgfile', subdir)]) req = urlopen(get_download_url(), data, timeout=TIMEOUT) with open(tmp, 'wb') as f: while True: buf = req.read(8192) if not buf: break f.write(buf) def extract_tar(self, subdir, pkgdir, tmpfile): with tarfile.open(tmpfile) as f: temp = tempfile.mkdtemp(dir=pkgdir) try: f.extractall(temp) os.rename(temp, os.path.join(pkgdir, subdir)) finally: try: shutil.rmtree(temp) except Exception: pass def get_pkg_dir(self, pkgname, pkgver, subdir): pkgdir = os.path.join(get_distnet_cache(), pkgname, pkgver) if not os.path.isdir(pkgdir): os.makedirs(pkgdir) target = os.path.join(pkgdir, subdir) if os.path.exists(target): return target (fd, tmp) = tempfile.mkstemp(dir=pkgdir) try: os.close(fd) self.download_to_file(pkgname, pkgver, subdir, tmp) if subdir == REQUIRES: os.rename(tmp, target) else: self.extract_tar(subdir, pkgdir, tmp) finally: try: os.remove(tmp) except OSError: pass return target def __iter__(self): if not self.constraints: self.compute_dependencies() for x in self.constraints: if x.pkgname == self.myproject: continue yield x def execute(self): self.compute_dependencies() packages = package_reader() def load_tools(ctx, extra): global packages for c in packages: packages.get_pkg_dir(c.pkgname, c.required_version, extra) noarchdir = packages.get_pkg_dir(c.pkgname, c.required_version, 'noarch') for x in os.listdir(noarchdir): if x.startswith('waf_') and x.endswith('.py'): ctx.load([x.rstrip('.py')], tooldir=[noarchdir]) def options(opt): opt.add_option('--offline', action='store_true') packages.execute() load_tools(opt, REQUIRES) def configure(conf): load_tools(conf, conf.variant) def build(bld): load_tools(bld, bld.variant) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/doxygen.py0000660000000000000000000001645700000000000022757 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Thomas Nagy 2008-2010 (ita) """ Doxygen support Variables passed to bld(): * doxyfile -- the Doxyfile to use * doxy_tar -- destination archive for generated documentation (if desired) * install_path -- where to install the documentation * pars -- dictionary overriding doxygen configuration settings When using this tool, the wscript will look like: def options(opt): opt.load('doxygen') def configure(conf): conf.load('doxygen') # check conf.env.DOXYGEN, if it is mandatory def build(bld): if bld.env.DOXYGEN: bld(features="doxygen", doxyfile='Doxyfile', ...) """ import os, os.path, re from collections import OrderedDict from waflib import Task, Utils, Node from waflib.TaskGen import feature DOXY_STR = '"${DOXYGEN}" - ' DOXY_FMTS = 'html latex man rft xml'.split() DOXY_FILE_PATTERNS = '*.' + ' *.'.join(''' c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx hpp h++ idl odl cs php php3 inc m mm py f90c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx '''.split()) re_rl = re.compile('\\\\\r*\n', re.MULTILINE) re_nl = re.compile('\r*\n', re.M) def parse_doxy(txt): ''' Parses a doxygen file. Returns an ordered dictionary. We cannot return a default dictionary, as the order in which the entries are reported does matter, especially for the '@INCLUDE' lines. ''' tbl = OrderedDict() txt = re_rl.sub('', txt) lines = re_nl.split(txt) for x in lines: x = x.strip() if not x or x.startswith('#') or x.find('=') < 0: continue if x.find('+=') >= 0: tmp = x.split('+=') key = tmp[0].strip() if key in tbl: tbl[key] += ' ' + '+='.join(tmp[1:]).strip() else: tbl[key] = '+='.join(tmp[1:]).strip() else: tmp = x.split('=') tbl[tmp[0].strip()] = '='.join(tmp[1:]).strip() return tbl class doxygen(Task.Task): vars = ['DOXYGEN', 'DOXYFLAGS'] color = 'BLUE' ext_in = [ '.py', '.c', '.h', '.java', '.pb.cc' ] def runnable_status(self): ''' self.pars are populated in runnable_status - because this function is being run *before* both self.pars "consumers" - scan() and run() set output_dir (node) for the output ''' for x in self.run_after: if not x.hasrun: return Task.ASK_LATER if not getattr(self, 'pars', None): txt = self.inputs[0].read() self.pars = parse_doxy(txt) # Override with any parameters passed to the task generator if getattr(self.generator, 'pars', None): for k, v in self.generator.pars.items(): self.pars[k] = v if self.pars.get('OUTPUT_DIRECTORY'): # Use the path parsed from the Doxyfile as an absolute path output_node = self.inputs[0].parent.get_bld().make_node(self.pars['OUTPUT_DIRECTORY']) else: # If no OUTPUT_PATH was specified in the Doxyfile, build path from the Doxyfile name + '.doxy' output_node = self.inputs[0].parent.get_bld().make_node(self.inputs[0].name + '.doxy') output_node.mkdir() self.pars['OUTPUT_DIRECTORY'] = output_node.abspath() self.doxy_inputs = getattr(self, 'doxy_inputs', []) if not self.pars.get('INPUT'): self.doxy_inputs.append(self.inputs[0].parent) else: for i in self.pars.get('INPUT').split(): if os.path.isabs(i): node = self.generator.bld.root.find_node(i) else: node = self.inputs[0].parent.find_node(i) if not node: self.generator.bld.fatal('Could not find the doxygen input %r' % i) self.doxy_inputs.append(node) if not getattr(self, 'output_dir', None): bld = self.generator.bld # Output path is always an absolute path as it was transformed above. self.output_dir = bld.root.find_dir(self.pars['OUTPUT_DIRECTORY']) self.signature() ret = Task.Task.runnable_status(self) if ret == Task.SKIP_ME: # in case the files were removed self.add_install() return ret def scan(self): exclude_patterns = self.pars.get('EXCLUDE_PATTERNS','').split() exclude_patterns = [pattern.replace('*/', '**/') for pattern in exclude_patterns] file_patterns = self.pars.get('FILE_PATTERNS','').split() if not file_patterns: file_patterns = DOXY_FILE_PATTERNS.split() if self.pars.get('RECURSIVE') == 'YES': file_patterns = ["**/%s" % pattern for pattern in file_patterns] nodes = [] names = [] for node in self.doxy_inputs: if os.path.isdir(node.abspath()): for m in node.ant_glob(incl=file_patterns, excl=exclude_patterns): nodes.append(m) else: nodes.append(node) return (nodes, names) def run(self): dct = self.pars.copy() code = '\n'.join(['%s = %s' % (x, dct[x]) for x in self.pars]) code = code.encode() # for python 3 #fmt = DOXY_STR % (self.inputs[0].parent.abspath()) cmd = Utils.subst_vars(DOXY_STR, self.env) env = self.env.env or None proc = Utils.subprocess.Popen(cmd, shell=True, stdin=Utils.subprocess.PIPE, env=env, cwd=self.inputs[0].parent.abspath()) proc.communicate(code) return proc.returncode def post_run(self): nodes = self.output_dir.ant_glob('**/*', quiet=True) for x in nodes: self.generator.bld.node_sigs[x] = self.uid() self.add_install() return Task.Task.post_run(self) def add_install(self): nodes = self.output_dir.ant_glob('**/*', quiet=True) self.outputs += nodes if getattr(self.generator, 'install_path', None): if not getattr(self.generator, 'doxy_tar', None): self.generator.add_install_files(install_to=self.generator.install_path, install_from=self.outputs, postpone=False, cwd=self.output_dir, relative_trick=True) class tar(Task.Task): "quick tar creation" run_str = '${TAR} ${TAROPTS} ${TGT} ${SRC}' color = 'RED' after = ['doxygen'] def runnable_status(self): for x in getattr(self, 'input_tasks', []): if not x.hasrun: return Task.ASK_LATER if not getattr(self, 'tar_done_adding', None): # execute this only once self.tar_done_adding = True for x in getattr(self, 'input_tasks', []): self.set_inputs(x.outputs) if not self.inputs: return Task.SKIP_ME return Task.Task.runnable_status(self) def __str__(self): tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs]) return '%s: %s\n' % (self.__class__.__name__, tgt_str) @feature('doxygen') def process_doxy(self): if not getattr(self, 'doxyfile', None): self.bld.fatal('no doxyfile variable specified??') node = self.doxyfile if not isinstance(node, Node.Node): node = self.path.find_resource(node) if not node: self.bld.fatal('doxygen file %s not found' % self.doxyfile) # the task instance dsk = self.create_task('doxygen', node, always_run=getattr(self, 'always', False)) if getattr(self, 'doxy_tar', None): tsk = self.create_task('tar', always_run=getattr(self, 'always', False)) tsk.input_tasks = [dsk] tsk.set_outputs(self.path.find_or_declare(self.doxy_tar)) if self.doxy_tar.endswith('bz2'): tsk.env['TAROPTS'] = ['cjf'] elif self.doxy_tar.endswith('gz'): tsk.env['TAROPTS'] = ['czf'] else: tsk.env['TAROPTS'] = ['cf'] if getattr(self, 'install_path', None): self.add_install_files(install_to=self.install_path, install_from=tsk.outputs) def configure(conf): ''' Check if doxygen and tar commands are present in the system If the commands are present, then conf.env.DOXYGEN and conf.env.TAR variables will be set. Detection can be controlled by setting DOXYGEN and TAR environmental variables. ''' conf.find_program('doxygen', var='DOXYGEN', mandatory=False) conf.find_program('tar', var='TAR', mandatory=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/dpapi.py0000660000000000000000000000556600000000000022376 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Matt Clarkson, 2012 ''' DPAPI access library (http://msdn.microsoft.com/en-us/library/ms995355.aspx) This file uses code originally created by Crusher Joe: http://article.gmane.org/gmane.comp.python.ctypes/420 And modified by Wayne Koorts: http://stackoverflow.com/questions/463832/using-dpapi-with-python ''' from ctypes import windll, byref, cdll, Structure, POINTER, c_char, c_buffer from ctypes.wintypes import DWORD from waflib.Configure import conf LocalFree = windll.kernel32.LocalFree memcpy = cdll.msvcrt.memcpy CryptProtectData = windll.crypt32.CryptProtectData CryptUnprotectData = windll.crypt32.CryptUnprotectData CRYPTPROTECT_UI_FORBIDDEN = 0x01 try: extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd'.encode('ascii') except AttributeError: extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd' class DATA_BLOB(Structure): _fields_ = [ ('cbData', DWORD), ('pbData', POINTER(c_char)) ] def get_data(blob_out): cbData = int(blob_out.cbData) pbData = blob_out.pbData buffer = c_buffer(cbData) memcpy(buffer, pbData, cbData) LocalFree(pbData) return buffer.raw @conf def dpapi_encrypt_data(self, input_bytes, entropy = extra_entropy): ''' Encrypts data and returns byte string :param input_bytes: The data to be encrypted :type input_bytes: String or Bytes :param entropy: Extra entropy to add to the encryption process (optional) :type entropy: String or Bytes ''' if not isinstance(input_bytes, bytes) or not isinstance(entropy, bytes): self.fatal('The inputs to dpapi must be bytes') buffer_in = c_buffer(input_bytes, len(input_bytes)) buffer_entropy = c_buffer(entropy, len(entropy)) blob_in = DATA_BLOB(len(input_bytes), buffer_in) blob_entropy = DATA_BLOB(len(entropy), buffer_entropy) blob_out = DATA_BLOB() if CryptProtectData(byref(blob_in), 'python_data', byref(blob_entropy), None, None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)): return get_data(blob_out) else: self.fatal('Failed to decrypt data') @conf def dpapi_decrypt_data(self, encrypted_bytes, entropy = extra_entropy): ''' Decrypts data and returns byte string :param encrypted_bytes: The encrypted data :type encrypted_bytes: Bytes :param entropy: Extra entropy to add to the encryption process (optional) :type entropy: String or Bytes ''' if not isinstance(encrypted_bytes, bytes) or not isinstance(entropy, bytes): self.fatal('The inputs to dpapi must be bytes') buffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes)) buffer_entropy = c_buffer(entropy, len(entropy)) blob_in = DATA_BLOB(len(encrypted_bytes), buffer_in) blob_entropy = DATA_BLOB(len(entropy), buffer_entropy) blob_out = DATA_BLOB() if CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None, None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)): return get_data(blob_out) else: self.fatal('Failed to decrypt data') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/eclipse.py0000660000000000000000000003777600000000000022735 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Eclipse CDT 5.0 generator for Waf # Richard Quirk 2009-1011 (New BSD License) # Thomas Nagy 2011 (ported to Waf 1.6) """ Usage: def options(opt): opt.load('eclipse') $ waf configure eclipse """ import sys, os from waflib import Utils, Logs, Context, Build, TaskGen, Scripting, Errors, Node from xml.dom.minidom import Document STANDARD_INCLUDES = [ '/usr/local/include', '/usr/include' ] oe_cdt = 'org.eclipse.cdt' cdt_mk = oe_cdt + '.make.core' cdt_core = oe_cdt + '.core' cdt_bld = oe_cdt + '.build.core' extbuilder_dir = '.externalToolBuilders' extbuilder_name = 'Waf_Builder.launch' class eclipse(Build.BuildContext): cmd = 'eclipse' fun = Scripting.default_cmd def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) appname = getattr(Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath())) self.create_cproject(appname, pythonpath=self.env['ECLIPSE_PYTHON_PATH']) # Helper to dump the XML document content to XML with UTF-8 encoding def write_conf_to_xml(self, filename, document): self.srcnode.make_node(filename).write(document.toprettyxml(encoding='UTF-8'), flags='wb') def create_cproject(self, appname, workspace_includes=[], pythonpath=[]): """ Create the Eclipse CDT .project and .cproject files @param appname The name that will appear in the Project Explorer @param build The BuildContext object to extract includes from @param workspace_includes Optional project includes to prevent "Unresolved Inclusion" errors in the Eclipse editor @param pythonpath Optional project specific python paths """ hasc = hasjava = haspython = False source_dirs = [] cpppath = self.env['CPPPATH'] javasrcpath = [] javalibpath = [] includes = STANDARD_INCLUDES if sys.platform != 'win32': cc = self.env.CC or self.env.CXX if cc: cmd = cc + ['-xc++', '-E', '-Wp,-v', '-'] try: gccout = self.cmd_and_log(cmd, output=Context.STDERR, quiet=Context.BOTH, input='\n'.encode()).splitlines() except Errors.WafError: pass else: includes = [] for ipath in gccout: if ipath.startswith(' /'): includes.append(ipath[1:]) cpppath += includes Logs.warn('Generating Eclipse CDT project files') for g in self.groups: for tg in g: if not isinstance(tg, TaskGen.task_gen): continue tg.post() # Add local Python modules paths to configuration so object resolving will work in IDE # This may also contain generated files (ie. pyqt5 or protoc) that get picked from build if 'py' in tg.features: pypath = tg.path.relpath() py_installfrom = getattr(tg, 'install_from', None) if isinstance(py_installfrom, Node.Node): pypath = py_installfrom.path_from(self.root.make_node(self.top_dir)) if pypath not in pythonpath: pythonpath.append(pypath) haspython = True # Add Java source directories so object resolving works in IDE # This may also contain generated files (ie. protoc) that get picked from build if 'javac' in tg.features: java_src = tg.path.relpath() java_srcdir = getattr(tg.javac_task, 'srcdir', None) if java_srcdir: if isinstance(java_srcdir, Node.Node): java_srcdir = [java_srcdir] for x in Utils.to_list(java_srcdir): x = x.path_from(self.root.make_node(self.top_dir)) if x not in javasrcpath: javasrcpath.append(x) else: if java_src not in javasrcpath: javasrcpath.append(java_src) hasjava = True # Check if there are external dependencies and add them as external jar so they will be resolved by Eclipse usedlibs=getattr(tg, 'use', []) for x in Utils.to_list(usedlibs): for cl in Utils.to_list(tg.env['CLASSPATH_'+x]): if cl not in javalibpath: javalibpath.append(cl) if not getattr(tg, 'link_task', None): continue features = Utils.to_list(getattr(tg, 'features', '')) is_cc = 'c' in features or 'cxx' in features incnodes = tg.to_incnodes(tg.to_list(getattr(tg, 'includes', [])) + tg.env['INCLUDES']) for p in incnodes: path = p.path_from(self.srcnode) if (path.startswith("/")): cpppath.append(path) else: workspace_includes.append(path) if is_cc and path not in source_dirs: source_dirs.append(path) hasc = True waf_executable = os.path.abspath(sys.argv[0]) project = self.impl_create_project(sys.executable, appname, hasc, hasjava, haspython, waf_executable) self.write_conf_to_xml('.project', project) if hasc: project = self.impl_create_cproject(sys.executable, waf_executable, appname, workspace_includes, cpppath, source_dirs) self.write_conf_to_xml('.cproject', project) if haspython: project = self.impl_create_pydevproject(sys.path, pythonpath) self.write_conf_to_xml('.pydevproject', project) if hasjava: project = self.impl_create_javaproject(javasrcpath, javalibpath) self.write_conf_to_xml('.classpath', project) def impl_create_project(self, executable, appname, hasc, hasjava, haspython, waf_executable): doc = Document() projectDescription = doc.createElement('projectDescription') self.add(doc, projectDescription, 'name', appname) self.add(doc, projectDescription, 'comment') self.add(doc, projectDescription, 'projects') buildSpec = self.add(doc, projectDescription, 'buildSpec') buildCommand = self.add(doc, buildSpec, 'buildCommand') self.add(doc, buildCommand, 'triggers', 'clean,full,incremental,') arguments = self.add(doc, buildCommand, 'arguments') dictionaries = {} # If CDT is present, instruct this one to call waf as it is more flexible (separate build/clean ...) if hasc: self.add(doc, buildCommand, 'name', oe_cdt + '.managedbuilder.core.genmakebuilder') # the default make-style targets are overwritten by the .cproject values dictionaries = { cdt_mk + '.contents': cdt_mk + '.activeConfigSettings', cdt_mk + '.enableAutoBuild': 'false', cdt_mk + '.enableCleanBuild': 'true', cdt_mk + '.enableFullBuild': 'true', } else: # Otherwise for Java/Python an external builder tool is created that will call waf build self.add(doc, buildCommand, 'name', 'org.eclipse.ui.externaltools.ExternalToolBuilder') dictionaries = { 'LaunchConfigHandle': '/%s/%s'%(extbuilder_dir, extbuilder_name), } # The definition is in a separate directory XML file try: os.mkdir(extbuilder_dir) except OSError: pass # Ignore error if already exists # Populate here the external builder XML calling waf builder = Document() launchConfiguration = doc.createElement('launchConfiguration') launchConfiguration.setAttribute('type', 'org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType') self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND', 'value': 'false'}) self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED', 'value': 'true'}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_LOCATION', 'value': waf_executable}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS', 'value': 'full,incremental,'}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS', 'value': 'build'}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY', 'value': '${project_loc}'}) builder.appendChild(launchConfiguration) # And write the XML to the file references before self.write_conf_to_xml('%s%s%s'%(extbuilder_dir, os.path.sep, extbuilder_name), builder) for k, v in dictionaries.items(): self.addDictionary(doc, arguments, k, v) natures = self.add(doc, projectDescription, 'natures') if hasc: nature_list = """ core.ccnature managedbuilder.core.ScannerConfigNature managedbuilder.core.managedBuildNature core.cnature """.split() for n in nature_list: self.add(doc, natures, 'nature', oe_cdt + '.' + n) if haspython: self.add(doc, natures, 'nature', 'org.python.pydev.pythonNature') if hasjava: self.add(doc, natures, 'nature', 'org.eclipse.jdt.core.javanature') doc.appendChild(projectDescription) return doc def impl_create_cproject(self, executable, waf_executable, appname, workspace_includes, cpppath, source_dirs=[]): doc = Document() doc.appendChild(doc.createProcessingInstruction('fileVersion', '4.0.0')) cconf_id = cdt_core + '.default.config.1' cproject = doc.createElement('cproject') storageModule = self.add(doc, cproject, 'storageModule', {'moduleId': cdt_core + '.settings'}) cconf = self.add(doc, storageModule, 'cconfiguration', {'id':cconf_id}) storageModule = self.add(doc, cconf, 'storageModule', {'buildSystemId': oe_cdt + '.managedbuilder.core.configurationDataProvider', 'id': cconf_id, 'moduleId': cdt_core + '.settings', 'name': 'Default'}) self.add(doc, storageModule, 'externalSettings') extensions = self.add(doc, storageModule, 'extensions') extension_list = """ VCErrorParser MakeErrorParser GCCErrorParser GASErrorParser GLDErrorParser """.split() self.add(doc, extensions, 'extension', {'id': cdt_core + '.ELF', 'point':cdt_core + '.BinaryParser'}) for e in extension_list: self.add(doc, extensions, 'extension', {'id': cdt_core + '.' + e, 'point':cdt_core + '.ErrorParser'}) storageModule = self.add(doc, cconf, 'storageModule', {'moduleId': 'cdtBuildSystem', 'version': '4.0.0'}) config = self.add(doc, storageModule, 'configuration', {'artifactName': appname, 'id': cconf_id, 'name': 'Default', 'parent': cdt_bld + '.prefbase.cfg'}) folderInfo = self.add(doc, config, 'folderInfo', {'id': cconf_id+'.', 'name': '/', 'resourcePath': ''}) toolChain = self.add(doc, folderInfo, 'toolChain', {'id': cdt_bld + '.prefbase.toolchain.1', 'name': 'No ToolChain', 'resourceTypeBasedDiscovery': 'false', 'superClass': cdt_bld + '.prefbase.toolchain'}) self.add(doc, toolChain, 'targetPlatform', {'binaryParser': 'org.eclipse.cdt.core.ELF', 'id': cdt_bld + '.prefbase.toolchain.1', 'name': ''}) waf_build = '"%s" %s'%(waf_executable, eclipse.fun) waf_clean = '"%s" clean'%(waf_executable) self.add(doc, toolChain, 'builder', {'autoBuildTarget': waf_build, 'command': executable, 'enableAutoBuild': 'false', 'cleanBuildTarget': waf_clean, 'enableIncrementalBuild': 'true', 'id': cdt_bld + '.settings.default.builder.1', 'incrementalBuildTarget': waf_build, 'managedBuildOn': 'false', 'name': 'Gnu Make Builder', 'superClass': cdt_bld + '.settings.default.builder'}) tool_index = 1; for tool_name in ("Assembly", "GNU C++", "GNU C"): tool = self.add(doc, toolChain, 'tool', {'id': cdt_bld + '.settings.holder.' + str(tool_index), 'name': tool_name, 'superClass': cdt_bld + '.settings.holder'}) if cpppath or workspace_includes: incpaths = cdt_bld + '.settings.holder.incpaths' option = self.add(doc, tool, 'option', {'id': incpaths + '.' + str(tool_index), 'name': 'Include Paths', 'superClass': incpaths, 'valueType': 'includePath'}) for i in workspace_includes: self.add(doc, option, 'listOptionValue', {'builtIn': 'false', 'value': '"${workspace_loc:/%s/%s}"'%(appname, i)}) for i in cpppath: self.add(doc, option, 'listOptionValue', {'builtIn': 'false', 'value': '"%s"'%(i)}) if tool_name == "GNU C++" or tool_name == "GNU C": self.add(doc,tool,'inputType',{ 'id':'org.eclipse.cdt.build.core.settings.holder.inType.' + str(tool_index), \ 'languageId':'org.eclipse.cdt.core.gcc' if tool_name == "GNU C" else 'org.eclipse.cdt.core.g++','languageName':tool_name, \ 'sourceContentType':'org.eclipse.cdt.core.cSource,org.eclipse.cdt.core.cHeader', \ 'superClass':'org.eclipse.cdt.build.core.settings.holder.inType' }) tool_index += 1 if source_dirs: sourceEntries = self.add(doc, config, 'sourceEntries') for i in source_dirs: self.add(doc, sourceEntries, 'entry', {'excluding': i, 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', 'kind': 'sourcePath', 'name': ''}) self.add(doc, sourceEntries, 'entry', { 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', 'kind': 'sourcePath', 'name': i}) storageModule = self.add(doc, cconf, 'storageModule', {'moduleId': cdt_mk + '.buildtargets'}) buildTargets = self.add(doc, storageModule, 'buildTargets') def addTargetWrap(name, runAll): return self.addTarget(doc, buildTargets, executable, name, '"%s" %s'%(waf_executable, name), runAll) addTargetWrap('configure', True) addTargetWrap('dist', False) addTargetWrap('install', False) addTargetWrap('check', False) storageModule = self.add(doc, cproject, 'storageModule', {'moduleId': 'cdtBuildSystem', 'version': '4.0.0'}) self.add(doc, storageModule, 'project', {'id': '%s.null.1'%appname, 'name': appname}) doc.appendChild(cproject) return doc def impl_create_pydevproject(self, system_path, user_path): # create a pydevproject file doc = Document() doc.appendChild(doc.createProcessingInstruction('eclipse-pydev', 'version="1.0"')) pydevproject = doc.createElement('pydev_project') prop = self.add(doc, pydevproject, 'pydev_property', 'python %d.%d'%(sys.version_info[0], sys.version_info[1])) prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_VERSION') prop = self.add(doc, pydevproject, 'pydev_property', 'Default') prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_INTERPRETER') # add waf's paths wafadmin = [p for p in system_path if p.find('wafadmin') != -1] if wafadmin: prop = self.add(doc, pydevproject, 'pydev_pathproperty', {'name':'org.python.pydev.PROJECT_EXTERNAL_SOURCE_PATH'}) for i in wafadmin: self.add(doc, prop, 'path', i) if user_path: prop = self.add(doc, pydevproject, 'pydev_pathproperty', {'name':'org.python.pydev.PROJECT_SOURCE_PATH'}) for i in user_path: self.add(doc, prop, 'path', '/${PROJECT_DIR_NAME}/'+i) doc.appendChild(pydevproject) return doc def impl_create_javaproject(self, javasrcpath, javalibpath): # create a .classpath file for java usage doc = Document() javaproject = doc.createElement('classpath') if javasrcpath: for i in javasrcpath: self.add(doc, javaproject, 'classpathentry', {'kind': 'src', 'path': i}) if javalibpath: for i in javalibpath: self.add(doc, javaproject, 'classpathentry', {'kind': 'lib', 'path': i}) self.add(doc, javaproject, 'classpathentry', {'kind': 'con', 'path': 'org.eclipse.jdt.launching.JRE_CONTAINER'}) self.add(doc, javaproject, 'classpathentry', {'kind': 'output', 'path': self.bldnode.name }) doc.appendChild(javaproject) return doc def addDictionary(self, doc, parent, k, v): dictionary = self.add(doc, parent, 'dictionary') self.add(doc, dictionary, 'key', k) self.add(doc, dictionary, 'value', v) return dictionary def addTarget(self, doc, buildTargets, executable, name, buildTarget, runAllBuilders=True): target = self.add(doc, buildTargets, 'target', {'name': name, 'path': '', 'targetID': oe_cdt + '.build.MakeTargetBuilder'}) self.add(doc, target, 'buildCommand', executable) self.add(doc, target, 'buildArguments', None) self.add(doc, target, 'buildTarget', buildTarget) self.add(doc, target, 'stopOnError', 'true') self.add(doc, target, 'useDefaultCommand', 'false') self.add(doc, target, 'runAllBuilders', str(runAllBuilders).lower()) def add(self, doc, parent, tag, value = None): el = doc.createElement(tag) if (value): if type(value) == type(str()): el.appendChild(doc.createTextNode(value)) elif type(value) == type(dict()): self.setAttributes(el, value) parent.appendChild(el) return el def setAttributes(self, node, attrs): for k, v in attrs.items(): node.setAttribute(k, v) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/erlang.py0000660000000000000000000000667300000000000022551 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) # Przemyslaw Rzepecki, 2016 """ Erlang support """ import re from waflib import Task, TaskGen from waflib.TaskGen import feature, after_method, before_method # to load the method "to_incnodes" below from waflib.Tools import ccroot # Those flags are required by the Erlang VM to execute/evaluate code in # non-interactive mode. It is used in this tool to create Erlang modules # documentation and run unit tests. The user can pass additional arguments to the # 'erl' command with ERL_FLAGS environment variable. EXEC_NON_INTERACTIVE = ['-noshell', '-noinput', '-eval'] def configure(conf): conf.find_program('erlc', var='ERLC') conf.find_program('erl', var='ERL') conf.add_os_flags('ERLC_FLAGS') conf.add_os_flags('ERL_FLAGS') conf.env.ERLC_DEF_PATTERN = '-D%s' conf.env.ERLC_INC_PATTERN = '-I%s' @TaskGen.extension('.erl') def process_erl_node(self, node): tsk = self.create_task('erl', node, node.change_ext('.beam')) tsk.erlc_incnodes = [tsk.outputs[0].parent] + self.to_incnodes(self.includes) tsk.env.append_value('ERLC_INCPATHS', [x.abspath() for x in tsk.erlc_incnodes]) tsk.env.append_value('ERLC_DEFINES', self.to_list(getattr(self, 'defines', []))) tsk.env.append_value('ERLC_FLAGS', self.to_list(getattr(self, 'flags', []))) tsk.cwd = tsk.outputs[0].parent class erl(Task.Task): color = 'GREEN' run_str = '${ERLC} ${ERL_FLAGS} ${ERLC_INC_PATTERN:ERLC_INCPATHS} ${ERLC_DEF_PATTERN:ERLC_DEFINES} ${SRC}' def scan(task): node = task.inputs[0] deps = [] scanned = set([]) nodes_to_scan = [node] for n in nodes_to_scan: if n.abspath() in scanned: continue for i in re.findall(r'-include\("(.*)"\)\.', n.read()): for d in task.erlc_incnodes: r = d.find_node(i) if r: deps.append(r) nodes_to_scan.append(r) break scanned.add(n.abspath()) return (deps, []) @TaskGen.extension('.beam') def process(self, node): pass class erl_test(Task.Task): color = 'BLUE' run_str = '${ERL} ${ERL_FLAGS} ${ERL_TEST_FLAGS}' @feature('eunit') @after_method('process_source') def add_erl_test_run(self): test_modules = [t.outputs[0] for t in self.tasks] test_task = self.create_task('erl_test') test_task.set_inputs(self.source + test_modules) test_task.cwd = test_modules[0].parent test_task.env.append_value('ERL_FLAGS', self.to_list(getattr(self, 'flags', []))) test_list = ", ".join([m.change_ext("").path_from(test_task.cwd)+":test()" for m in test_modules]) test_flag = 'halt(case lists:all(fun(Elem) -> Elem == ok end, [%s]) of true -> 0; false -> 1 end).' % test_list test_task.env.append_value('ERL_TEST_FLAGS', EXEC_NON_INTERACTIVE) test_task.env.append_value('ERL_TEST_FLAGS', test_flag) class edoc(Task.Task): color = 'BLUE' run_str = "${ERL} ${ERL_FLAGS} ${ERL_DOC_FLAGS}" def keyword(self): return 'Generating edoc' @feature('edoc') @before_method('process_source') def add_edoc_task(self): # do not process source, it would create double erl->beam task self.meths.remove('process_source') e = self.path.find_resource(self.source) t = e.change_ext('.html') png = t.parent.make_node('erlang.png') css = t.parent.make_node('stylesheet.css') tsk = self.create_task('edoc', e, [t, png, css]) tsk.cwd = tsk.outputs[0].parent tsk.env.append_value('ERL_DOC_FLAGS', EXEC_NON_INTERACTIVE) tsk.env.append_value('ERL_DOC_FLAGS', 'edoc:files(["%s"]), halt(0).' % tsk.inputs[0].abspath()) # TODO the above can break if a file path contains '"' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.2574158 tevent-0.11.0/third_party/waf/waflib/extras/fast_partial.py0000660000000000000000000003622700000000000023750 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2017-2018 (ita) """ A system for fast partial rebuilds Creating a large amount of task objects up front can take some time. By making a few assumptions, it is possible to avoid posting creating task objects for targets that are already up-to-date. On a silly benchmark the gain observed for 1M tasks can be 5m->10s for a single file change. Usage:: def options(opt): opt.load('fast_partial') Assumptions: * Start with a clean build (run "waf distclean" after enabling) * Mostly for C/C++/Fortran targets with link tasks (object-only targets are not handled) try it in the folder generated by utils/genbench.py * For full project builds: no --targets and no pruning from subfolders * The installation phase is ignored * `use=` dependencies are specified up front even across build groups * Task generator source files are not obtained from globs Implementation details: * The first layer obtains file timestamps to recalculate file hashes only when necessary (similar to md5_tstamp); the timestamps are then stored in a dedicated pickle file * A second layer associates each task generator to a file set to help detecting changes. Task generators are to create their tasks only when the related files have been modified. A specific db file is created to store such data (5m -> 1m10) * A third layer binds build context proxies onto task generators, replacing the default context. While loading data for the full build uses more memory (4GB -> 9GB), partial builds are then much faster (1m10 -> 13s) * A fourth layer enables a 2-level cache on file signatures to reduce the size of the main pickle file (13s -> 10s) """ import os from waflib import Build, Context, Errors, Logs, Task, TaskGen, Utils from waflib.TaskGen import feature, after_method, taskgen_method import waflib.Node DONE = 0 DIRTY = 1 NEEDED = 2 SKIPPABLE = ['cshlib', 'cxxshlib', 'cstlib', 'cxxstlib', 'cprogram', 'cxxprogram'] TSTAMP_DB = '.wafpickle_tstamp_db_file' SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split() class bld_proxy(object): def __init__(self, bld): object.__setattr__(self, 'bld', bld) object.__setattr__(self, 'node_class', type('Nod3', (waflib.Node.Node,), {})) self.node_class.__module__ = 'waflib.Node' self.node_class.ctx = self object.__setattr__(self, 'root', self.node_class('', None)) for x in SAVED_ATTRS: if x != 'root': object.__setattr__(self, x, {}) self.fix_nodes() def __setattr__(self, name, value): bld = object.__getattribute__(self, 'bld') setattr(bld, name, value) def __delattr__(self, name): bld = object.__getattribute__(self, 'bld') delattr(bld, name) def __getattribute__(self, name): try: return object.__getattribute__(self, name) except AttributeError: bld = object.__getattribute__(self, 'bld') return getattr(bld, name) def __call__(self, *k, **kw): return self.bld(*k, **kw) def fix_nodes(self): for x in ('srcnode', 'path', 'bldnode'): node = self.root.find_dir(getattr(self.bld, x).abspath()) object.__setattr__(self, x, node) def set_key(self, store_key): object.__setattr__(self, 'store_key', store_key) def fix_tg_path(self, *tgs): # changing Node objects on task generators is possible # yet, all Node objects must belong to the same parent for tg in tgs: tg.path = self.root.make_node(tg.path.abspath()) def restore(self): dbfn = os.path.join(self.variant_dir, Context.DBFILE + self.store_key) Logs.debug('rev_use: reading %s', dbfn) try: data = Utils.readf(dbfn, 'rb') except (EnvironmentError, EOFError): # handle missing file/empty file Logs.debug('rev_use: Could not load the build cache %s (missing)', dbfn) else: try: waflib.Node.pickle_lock.acquire() waflib.Node.Nod3 = self.node_class try: data = Build.cPickle.loads(data) except Exception as e: Logs.debug('rev_use: Could not pickle the build cache %s: %r', dbfn, e) else: for x in SAVED_ATTRS: object.__setattr__(self, x, data.get(x, {})) finally: waflib.Node.pickle_lock.release() self.fix_nodes() def store(self): data = {} for x in Build.SAVED_ATTRS: data[x] = getattr(self, x) db = os.path.join(self.variant_dir, Context.DBFILE + self.store_key) with waflib.Node.pickle_lock: waflib.Node.Nod3 = self.node_class try: x = Build.cPickle.dumps(data, Build.PROTOCOL) except Build.cPickle.PicklingError: root = data['root'] for node_deps in data['node_deps'].values(): for idx, node in enumerate(node_deps): # there may be more cross-context Node objects to fix, # but this should be the main source node_deps[idx] = root.find_node(node.abspath()) x = Build.cPickle.dumps(data, Build.PROTOCOL) Logs.debug('rev_use: storing %s', db) Utils.writef(db + '.tmp', x, m='wb') try: st = os.stat(db) os.remove(db) if not Utils.is_win32: os.chown(db + '.tmp', st.st_uid, st.st_gid) except (AttributeError, OSError): pass os.rename(db + '.tmp', db) class bld(Build.BuildContext): def __init__(self, **kw): super(bld, self).__init__(**kw) self.hashes_md5_tstamp = {} def __call__(self, *k, **kw): # this is one way of doing it, one could use a task generator method too bld = kw['bld'] = bld_proxy(self) ret = TaskGen.task_gen(*k, **kw) self.task_gen_cache_names = {} self.add_to_group(ret, group=kw.get('group')) ret.bld = bld bld.set_key(ret.path.abspath().replace(os.sep, '') + str(ret.idx)) return ret def is_dirty(self): return True def store_tstamps(self): # Called after a build is finished # For each task generator, record all files involved in task objects # optimization: done only if there was something built do_store = False try: f_deps = self.f_deps except AttributeError: f_deps = self.f_deps = {} self.f_tstamps = {} allfiles = set() for g in self.groups: for tg in g: try: staleness = tg.staleness except AttributeError: staleness = DIRTY if staleness != DIRTY: # DONE case: there was nothing built # NEEDED case: the tg was brought in because of 'use' propagation # but nothing really changed for them, there may be incomplete # tasks (object files) and in this case it is best to let the next build # figure out if an input/output file changed continue do_cache = False for tsk in tg.tasks: if tsk.hasrun == Task.SUCCESS: do_cache = True pass elif tsk.hasrun == Task.SKIPPED: pass else: # one failed task, clear the cache for this tg try: del f_deps[(tg.path.abspath(), tg.idx)] except KeyError: pass else: # just store the new state because there is a change do_store = True # skip the rest because there is no valid cache possible break else: if not do_cache: # all skipped, but is there anything in cache? try: f_deps[(tg.path.abspath(), tg.idx)] except KeyError: # probably cleared because a wscript file changed # store it do_cache = True if do_cache: # there was a rebuild, store the data structure too tg.bld.store() # all tasks skipped but no cache # or a successful task build do_store = True st = set() for tsk in tg.tasks: st.update(tsk.inputs) st.update(self.node_deps.get(tsk.uid(), [])) # TODO do last/when loading the tgs? lst = [] for k in ('wscript', 'wscript_build'): n = tg.path.find_node(k) if n: n.get_bld_sig() lst.append(n.abspath()) lst.extend(sorted(x.abspath() for x in st)) allfiles.update(lst) f_deps[(tg.path.abspath(), tg.idx)] = lst for x in allfiles: # f_tstamps has everything, while md5_tstamp can be relatively empty on partial builds self.f_tstamps[x] = self.hashes_md5_tstamp[x][0] if do_store: dbfn = os.path.join(self.variant_dir, TSTAMP_DB) Logs.debug('rev_use: storing %s', dbfn) dbfn_tmp = dbfn + '.tmp' x = Build.cPickle.dumps([self.f_tstamps, f_deps], Build.PROTOCOL) Utils.writef(dbfn_tmp, x, m='wb') os.rename(dbfn_tmp, dbfn) Logs.debug('rev_use: stored %s', dbfn) def store(self): self.store_tstamps() if self.producer.dirty: Build.BuildContext.store(self) def compute_needed_tgs(self): # assume the 'use' keys are not modified during the build phase dbfn = os.path.join(self.variant_dir, TSTAMP_DB) Logs.debug('rev_use: Loading %s', dbfn) try: data = Utils.readf(dbfn, 'rb') except (EnvironmentError, EOFError): Logs.debug('rev_use: Could not load the build cache %s (missing)', dbfn) self.f_deps = {} self.f_tstamps = {} else: try: self.f_tstamps, self.f_deps = Build.cPickle.loads(data) except Exception as e: Logs.debug('rev_use: Could not pickle the build cache %s: %r', dbfn, e) self.f_deps = {} self.f_tstamps = {} else: Logs.debug('rev_use: Loaded %s', dbfn) # 1. obtain task generators that contain rebuilds # 2. obtain the 'use' graph and its dual stales = set() reverse_use_map = Utils.defaultdict(list) use_map = Utils.defaultdict(list) for g in self.groups: for tg in g: if tg.is_stale(): stales.add(tg) try: lst = tg.use = Utils.to_list(tg.use) except AttributeError: pass else: for x in lst: try: xtg = self.get_tgen_by_name(x) except Errors.WafError: pass else: use_map[tg].append(xtg) reverse_use_map[xtg].append(tg) Logs.debug('rev_use: found %r stale tgs', len(stales)) # 3. dfs to post downstream tg as stale visited = set() def mark_down(tg): if tg in visited: return visited.add(tg) Logs.debug('rev_use: marking down %r as stale', tg.name) tg.staleness = DIRTY for x in reverse_use_map[tg]: mark_down(x) for tg in stales: mark_down(tg) # 4. dfs to find ancestors tg to mark as needed self.needed_tgs = needed_tgs = set() def mark_needed(tg): if tg in needed_tgs: return needed_tgs.add(tg) if tg.staleness == DONE: Logs.debug('rev_use: marking up %r as needed', tg.name) tg.staleness = NEEDED for x in use_map[tg]: mark_needed(x) for xx in visited: mark_needed(xx) # so we have the whole tg trees to post in the set "needed" # load their build trees for tg in needed_tgs: tg.bld.restore() tg.bld.fix_tg_path(tg) # the stale ones should be fully build, while the needed ones # may skip a few tasks, see create_compiled_task and apply_link_after below Logs.debug('rev_use: amount of needed task gens: %r', len(needed_tgs)) def post_group(self): # assumption: we can ignore the folder/subfolders cuts def tgpost(tg): try: f = tg.post except AttributeError: pass else: f() if not self.targets or self.targets == '*': for tg in self.groups[self.current_group]: # this can cut quite a lot of tg objects if tg in self.needed_tgs: tgpost(tg) else: # default implementation return Build.BuildContext.post_group() def get_build_iterator(self): if not self.targets or self.targets == '*': self.compute_needed_tgs() return Build.BuildContext.get_build_iterator(self) @taskgen_method def is_stale(self): # assume no globs self.staleness = DIRTY # 1. the case of always stale targets if getattr(self, 'always_stale', False): return True # 2. check if the db file exists db = os.path.join(self.bld.variant_dir, Context.DBFILE) try: dbstat = os.stat(db).st_mtime except OSError: Logs.debug('rev_use: must post %r because this is a clean build') return True # 3.a check if the configuration exists cache_node = self.bld.bldnode.find_node('c4che/build.config.py') if not cache_node: return True # 3.b check if the configuration changed if os.stat(cache_node.abspath()).st_mtime > dbstat: Logs.debug('rev_use: must post %r because the configuration has changed', self.name) return True # 3.c any tstamp data? try: f_deps = self.bld.f_deps except AttributeError: Logs.debug('rev_use: must post %r because there is no f_deps', self.name) return True # 4. check if this is the first build (no cache) try: lst = f_deps[(self.path.abspath(), self.idx)] except KeyError: Logs.debug('rev_use: must post %r because there it has no cached data', self.name) return True try: cache = self.bld.cache_tstamp_rev_use except AttributeError: cache = self.bld.cache_tstamp_rev_use = {} # 5. check the timestamp of each dependency files listed is unchanged f_tstamps = self.bld.f_tstamps for x in lst: try: old_ts = f_tstamps[x] except KeyError: Logs.debug('rev_use: must post %r because %r is not in cache', self.name, x) return True try: try: ts = cache[x] except KeyError: ts = cache[x] = os.stat(x).st_mtime except OSError: del f_deps[(self.path.abspath(), self.idx)] Logs.debug('rev_use: must post %r because %r does not exist anymore', self.name, x) return True else: if ts != old_ts: Logs.debug('rev_use: must post %r because the timestamp on %r changed %r %r', self.name, x, old_ts, ts) return True self.staleness = DONE return False @taskgen_method def create_compiled_task(self, name, node): # skip the creation of object files # assumption: object-only targets are not skippable if self.staleness == NEEDED: # only libraries/programs can skip object files for x in SKIPPABLE: if x in self.features: return None out = '%s.%d.o' % (node.name, self.idx) task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @feature(*SKIPPABLE) @after_method('apply_link') def apply_link_after(self): # cprogram/cxxprogram might be unnecessary if self.staleness != NEEDED: return for tsk in self.tasks: tsk.hasrun = Task.SKIPPED def path_from(self, node): # handle nodes of distinct types if node.ctx is not self.ctx: node = self.ctx.root.make_node(node.abspath()) return self.default_path_from(node) waflib.Node.Node.default_path_from = waflib.Node.Node.path_from waflib.Node.Node.path_from = path_from def h_file(self): # similar to md5_tstamp.py, but with 2-layer cache # global_cache for the build context common for all task generators # local_cache for the build context proxy (one by task generator) # # the global cache is not persistent # the local cache is persistent and meant for partial builds # # assume all calls are made from a single thread # filename = self.abspath() st = os.stat(filename) global_cache = self.ctx.bld.hashes_md5_tstamp local_cache = self.ctx.hashes_md5_tstamp if filename in global_cache: # value already calculated in this build cval = global_cache[filename] # the value in global cache is assumed to be calculated once # reverifying it could cause task generators # to get distinct tstamp values, thus missing rebuilds local_cache[filename] = cval return cval[1] if filename in local_cache: cval = local_cache[filename] if cval[0] == st.st_mtime: # correct value from a previous build # put it in the global cache global_cache[filename] = cval return cval[1] ret = Utils.h_file(filename) local_cache[filename] = global_cache[filename] = (st.st_mtime, ret) return ret waflib.Node.Node.h_file = h_file ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_bgxlf.py0000660000000000000000000000132600000000000023041 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].insert(0, 'fc_bgxlf') @conf def find_bgxlf(conf): fc = conf.find_program(['bgxlf2003_r','bgxlf2003'], var='FC') conf.get_xlf_version(fc) conf.env.FC_NAME = 'BGXLF' @conf def bg_flags(self): self.env.SONAME_ST = '' self.env.FCSHLIB_MARKER = '' self.env.FCSTLIB_MARKER = '' self.env.FCFLAGS_fcshlib = ['-fPIC'] self.env.LINKFLAGS_fcshlib = ['-G', '-Wl,-bexpfull'] def configure(conf): conf.find_bgxlf() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.xlf_flags() conf.bg_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_cray.py0000660000000000000000000000264600000000000022703 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_cray') @conf def find_crayftn(conf): """Find the Cray fortran compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['crayftn'], var='FC') conf.get_crayftn_version(fc) conf.env.FC_NAME = 'CRAY' conf.env.FC_MOD_CAPITALIZATION = 'UPPER.mod' @conf def crayftn_flags(conf): v = conf.env v['_FCMODOUTFLAGS'] = ['-em', '-J.'] # enable module files and put them in the current directory v['FCFLAGS_DEBUG'] = ['-m1'] # more verbose compiler warnings v['FCFLAGS_fcshlib'] = ['-h pic'] v['LINKFLAGS_fcshlib'] = ['-h shared'] v['FCSTLIB_MARKER'] = '-h static' v['FCSHLIB_MARKER'] = '-h dynamic' @conf def get_crayftn_version(conf, fc): version_re = re.compile(r"Cray Fortran\s*:\s*Version\s*(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the Cray Fortran compiler version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_crayftn() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.crayftn_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_nag.py0000660000000000000000000000276000000000000022507 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].insert(0, 'fc_nag') @conf def find_nag(conf): """Find the NAG Fortran Compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['nagfor'], var='FC') conf.get_nag_version(fc) conf.env.FC_NAME = 'NAG' conf.env.FC_MOD_CAPITALIZATION = 'lower' @conf def nag_flags(conf): v = conf.env v.FCFLAGS_DEBUG = ['-C=all'] v.FCLNK_TGT_F = ['-o', ''] v.FC_TGT_F = ['-c', '-o', ''] @conf def nag_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() nag_modifier_func = getattr(conf, 'nag_modifier_' + dest_os, None) if nag_modifier_func: nag_modifier_func() @conf def get_nag_version(conf, fc): """Get the NAG compiler version""" version_re = re.compile(r"^NAG Fortran Compiler *Release *(?P\d*)\.(?P\d*)", re.M).search cmd = fc + ['-V'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) if not match: match = version_re(err) else: match = version_re(err) if not match: conf.fatal('Could not determine the NAG version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_nag() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.nag_flags() conf.nag_modifier_platform() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_nec.py0000660000000000000000000000317300000000000022506 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_nec') @conf def find_sxfc(conf): """Find the NEC fortran compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['sxf90','sxf03'], var='FC') conf.get_sxfc_version(fc) conf.env.FC_NAME = 'NEC' conf.env.FC_MOD_CAPITALIZATION = 'lower' @conf def sxfc_flags(conf): v = conf.env v['_FCMODOUTFLAGS'] = [] # enable module files and put them in the current directory v['FCFLAGS_DEBUG'] = [] # more verbose compiler warnings v['FCFLAGS_fcshlib'] = [] v['LINKFLAGS_fcshlib'] = [] v['FCSTLIB_MARKER'] = '' v['FCSHLIB_MARKER'] = '' @conf def get_sxfc_version(conf, fc): version_re = re.compile(r"FORTRAN90/SX\s*Version\s*(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: version_re=re.compile(r"NEC Fortran 2003 Compiler for\s*(?P\S*)\s*\(c\)\s*(?P\d*)",re.I).search if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the NEC Fortran compiler version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_sxfc() conf.find_program('sxar',var='AR') conf.add_os_flags('ARFLAGS') if not conf.env.ARFLAGS: conf.env.ARFLAGS=['rcs'] conf.fc_flags() conf.fc_add_flags() conf.sxfc_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_nfort.py0000660000000000000000000000243000000000000023064 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Detection of the NEC Fortran compiler for Aurora Tsubasa import re from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_nfort') @conf def find_nfort(conf): fc=conf.find_program(['nfort'],var='FC') conf.get_nfort_version(fc) conf.env.FC_NAME='NFORT' conf.env.FC_MOD_CAPITALIZATION='lower' @conf def nfort_flags(conf): v=conf.env v['_FCMODOUTFLAGS']=[] v['FCFLAGS_DEBUG']=[] v['FCFLAGS_fcshlib']=[] v['LINKFLAGS_fcshlib']=[] v['FCSTLIB_MARKER']='' v['FCSHLIB_MARKER']='' @conf def get_nfort_version(conf,fc): version_re=re.compile(r"nfort\s*\(NFORT\)\s*(?P\d+)\.(?P\d+)\.",re.I).search cmd=fc+['--version'] out,err=fc_config.getoutput(conf,cmd,stdin=False) if out: match=version_re(out) else: match=version_re(err) if not match: return(False) conf.fatal('Could not determine the NEC NFORT Fortran compiler version.') else: k=match.groupdict() conf.env['FC_VERSION']=(k['major'],k['minor']) def configure(conf): conf.find_nfort() conf.find_program('nar',var='AR') conf.add_os_flags('ARFLAGS') if not conf.env.ARFLAGS: conf.env.ARFLAGS=['rcs'] conf.fc_flags() conf.fc_add_flags() conf.nfort_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_open64.py0000660000000000000000000000274600000000000023061 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].insert(0, 'fc_open64') @conf def find_openf95(conf): """Find the Open64 Fortran Compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['openf95', 'openf90'], var='FC') conf.get_open64_version(fc) conf.env.FC_NAME = 'OPEN64' conf.env.FC_MOD_CAPITALIZATION = 'UPPER.mod' @conf def openf95_flags(conf): v = conf.env v['FCFLAGS_DEBUG'] = ['-fullwarn'] @conf def openf95_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() openf95_modifier_func = getattr(conf, 'openf95_modifier_' + dest_os, None) if openf95_modifier_func: openf95_modifier_func() @conf def get_open64_version(conf, fc): """Get the Open64 compiler version""" version_re = re.compile(r"Open64 Compiler Suite: *Version *(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-version'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the Open64 version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_openf95() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.openf95_flags() conf.openf95_modifier_platform() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_pgfortran.py0000660000000000000000000000336400000000000023745 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_pgfortran') @conf def find_pgfortran(conf): """Find the PGI fortran compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['pgfortran', 'pgf95', 'pgf90'], var='FC') conf.get_pgfortran_version(fc) conf.env.FC_NAME = 'PGFC' @conf def pgfortran_flags(conf): v = conf.env v['FCFLAGS_fcshlib'] = ['-shared'] v['FCFLAGS_DEBUG'] = ['-Minform=inform', '-Mstandard'] # why not v['FCSTLIB_MARKER'] = '-Bstatic' v['FCSHLIB_MARKER'] = '-Bdynamic' v['SONAME_ST'] = '-soname %s' @conf def get_pgfortran_version(conf,fc): version_re = re.compile(r"The Portland Group", re.I).search cmd = fc + ['-V'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not verify PGI signature') cmd = fc + ['-help=variable'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out.find('COMPVER')<0: conf.fatal('Could not determine the compiler type') k = {} prevk = '' out = out.splitlines() for line in out: lst = line.partition('=') if lst[1] == '=': key = lst[0].rstrip() if key == '': key = prevk val = lst[2].rstrip() k[key] = val else: prevk = line.partition(' ')[0] def isD(var): return var in k def isT(var): return var in k and k[var]!='0' conf.env['FC_VERSION'] = (k['COMPVER'].split('.')) def configure(conf): conf.find_pgfortran() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.pgfortran_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_solstudio.py0000660000000000000000000000315200000000000023763 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_solstudio') @conf def find_solstudio(conf): """Find the Solaris Studio compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['sunf95', 'f95', 'sunf90', 'f90'], var='FC') conf.get_solstudio_version(fc) conf.env.FC_NAME = 'SOL' @conf def solstudio_flags(conf): v = conf.env v['FCFLAGS_fcshlib'] = ['-Kpic'] v['FCFLAGS_DEBUG'] = ['-w3'] v['LINKFLAGS_fcshlib'] = ['-G'] v['FCSTLIB_MARKER'] = '-Bstatic' v['FCSHLIB_MARKER'] = '-Bdynamic' v['SONAME_ST'] = '-h %s' @conf def solstudio_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() solstudio_modifier_func = getattr(conf, 'solstudio_modifier_' + dest_os, None) if solstudio_modifier_func: solstudio_modifier_func() @conf def get_solstudio_version(conf, fc): """Get the compiler version""" version_re = re.compile(r"Sun Fortran 95 *(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the Sun Studio Fortran version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_solstudio() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.solstudio_flags() conf.solstudio_modifier_platform() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fc_xlf.py0000660000000000000000000000311700000000000022530 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils,Errors from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['aix'].insert(0, 'fc_xlf') @conf def find_xlf(conf): """Find the xlf program (will look in the environment variable 'FC')""" fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC') conf.get_xlf_version(fc) conf.env.FC_NAME='XLF' @conf def xlf_flags(conf): v = conf.env v['FCDEFINES_ST'] = '-WF,-D%s' v['FCFLAGS_fcshlib'] = ['-qpic=small'] v['FCFLAGS_DEBUG'] = ['-qhalt=w'] v['LINKFLAGS_fcshlib'] = ['-Wl,-shared'] @conf def xlf_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None) if xlf_modifier_func: xlf_modifier_func() @conf def get_xlf_version(conf, fc): """Get the compiler version""" cmd = fc + ['-qversion'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find xlf %r' % cmd) for v in (r"IBM XL Fortran.* V(?P\d*)\.(?P\d*)",): version_re = re.compile(v, re.I).search match = version_re(out or err) if match: k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) break else: conf.fatal('Could not determine the XLF version.') def configure(conf): conf.find_xlf() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.xlf_flags() conf.xlf_modifier_platform() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/file_to_object.py0000660000000000000000000000663100000000000024242 0ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # Tool to embed file into objects __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ This tool allows to embed file contents in object files (.o). It is not exactly portable, and the file contents are reachable using various non-portable fashions. The goal here is to provide a functional interface to the embedding of file data in objects. See the ``playground/embedded_resources`` example for an example. Usage:: bld( name='pipeline', # ^ Reference this in use="..." for things using the generated code features='file_to_object', source='some.file', # ^ Name of the file to embed in binary section. ) Known issues: - Destination is named like source, with extension renamed to .o eg. some.file -> some.o """ import os, sys from waflib import Task, TaskGen, Errors def filename_c_escape(x): return x.replace("\\", "\\\\") class file_to_object_s(Task.Task): color = 'CYAN' vars = ['DEST_CPU', 'DEST_BINFMT'] def run(self): name = [] for i, x in enumerate(self.inputs[0].name): if x.isalnum(): name.append(x) else: name.append('_') file = self.inputs[0].abspath() size = os.path.getsize(file) if self.env.DEST_CPU in ('x86_64', 'ia', 'aarch64'): unit = 'quad' align = 8 elif self.env.DEST_CPU in ('x86','arm', 'thumb', 'm68k'): unit = 'long' align = 4 else: raise Errors.WafError("Unsupported DEST_CPU, please report bug!") file = filename_c_escape(file) name = "_binary_" + "".join(name) rodata = ".section .rodata" if self.env.DEST_BINFMT == "mac-o": name = "_" + name rodata = ".section __TEXT,__const" with open(self.outputs[0].abspath(), 'w') as f: f.write(\ """ .global %(name)s_start .global %(name)s_end .global %(name)s_size %(rodata)s %(name)s_start: .incbin "%(file)s" %(name)s_end: .align %(align)d %(name)s_size: .%(unit)s 0x%(size)x """ % locals()) class file_to_object_c(Task.Task): color = 'CYAN' def run(self): name = [] for i, x in enumerate(self.inputs[0].name): if x.isalnum(): name.append(x) else: name.append('_') file = self.inputs[0].abspath() size = os.path.getsize(file) name = "_binary_" + "".join(name) def char_to_num(ch): if sys.version_info[0] < 3: return ord(ch) return ch data = self.inputs[0].read('rb') lines, line = [], [] for idx_byte, byte in enumerate(data): line.append(byte) if len(line) > 15 or idx_byte == size-1: lines.append(", ".join(("0x%02x" % char_to_num(x)) for x in line)) line = [] data = ",\n ".join(lines) self.outputs[0].write(\ """ unsigned long %(name)s_size = %(size)dL; char const %(name)s_start[] = { %(data)s }; char const %(name)s_end[] = {}; """ % locals()) @TaskGen.feature('file_to_object') @TaskGen.before_method('process_source') def tg_file_to_object(self): bld = self.bld sources = self.to_nodes(self.source) targets = [] for src in sources: if bld.env.F2O_METHOD == ["asm"]: tgt = src.parent.find_or_declare(src.name + '.f2o.s') tsk = self.create_task('file_to_object_s', src, tgt) tsk.cwd = src.parent.abspath() # verify else: tgt = src.parent.find_or_declare(src.name + '.f2o.c') tsk = self.create_task('file_to_object_c', src, tgt) tsk.cwd = src.parent.abspath() # verify targets.append(tgt) self.source = targets def configure(conf): conf.load('gas') conf.env.F2O_METHOD = ["c"] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fluid.py0000660000000000000000000000153600000000000022375 0ustar00rootroot00000000000000#!/usr/bin/python # encoding: utf-8 # Grygoriy Fuchedzhy 2009 """ Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjunction with the 'cxx' feature. """ from waflib import Task from waflib.TaskGen import extension class fluid(Task.Task): color = 'BLUE' ext_out = ['.h'] run_str = '${FLUID} -c -o ${TGT[0].abspath()} -h ${TGT[1].abspath()} ${SRC}' @extension('.fl') def process_fluid(self, node): """add the .fl to the source list; the cxx file generated will be compiled when possible""" cpp = node.change_ext('.cpp') hpp = node.change_ext('.hpp') self.create_task('fluid', node, [cpp, hpp]) if 'cxx' in self.features: self.source.append(cpp) def configure(conf): conf.find_program('fluid', var='FLUID') conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/freeimage.py0000660000000000000000000000410500000000000023211 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # # written by Sylvain Rouquette, 2011 ''' To add the freeimage tool to the waf file: $ ./waf-light --tools=compat15,freeimage or, if you have waf >= 1.6.2 $ ./waf update --files=freeimage The wscript will look like: def options(opt): opt.load('compiler_cxx freeimage') def configure(conf): conf.load('compiler_cxx freeimage') # you can call check_freeimage with some parameters. # It's optional on Linux, it's 'mandatory' on Windows if # you didn't use --fi-path on the command-line # conf.check_freeimage(path='FreeImage/Dist', fip=True) def build(bld): bld(source='main.cpp', target='app', use='FREEIMAGE') ''' from waflib import Utils from waflib.Configure import conf def options(opt): opt.add_option('--fi-path', type='string', default='', dest='fi_path', help='''path to the FreeImage directory \ where the files are e.g. /FreeImage/Dist''') opt.add_option('--fip', action='store_true', default=False, dest='fip', help='link with FreeImagePlus') opt.add_option('--fi-static', action='store_true', default=False, dest='fi_static', help="link as shared libraries") @conf def check_freeimage(self, path=None, fip=False): self.start_msg('Checking FreeImage') if not self.env['CXX']: self.fatal('you must load compiler_cxx before loading freeimage') prefix = self.options.fi_static and 'ST' or '' platform = Utils.unversioned_sys_platform() if platform == 'win32': if not path: self.fatal('you must specify the path to FreeImage. \ use --fi-path=/FreeImage/Dist') else: self.env['INCLUDES_FREEIMAGE'] = path self.env['%sLIBPATH_FREEIMAGE' % prefix] = path libs = ['FreeImage'] if self.options.fip: libs.append('FreeImagePlus') if platform == 'win32': self.env['%sLIB_FREEIMAGE' % prefix] = libs else: self.env['%sLIB_FREEIMAGE' % prefix] = [i.lower() for i in libs] self.end_msg('ok') def configure(conf): platform = Utils.unversioned_sys_platform() if platform == 'win32' and not conf.options.fi_path: return conf.check_freeimage(conf.options.fi_path, conf.options.fip) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fsb.py0000660000000000000000000000107400000000000022041 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Fully sequential builds The previous tasks from task generators are re-processed, and this may lead to speed issues Yet, if you are using this, speed is probably a minor concern """ from waflib import Build def options(opt): pass def configure(conf): pass class FSBContext(Build.BuildContext): def __call__(self, *k, **kw): ret = Build.BuildContext.__call__(self, *k, **kw) # evaluate the results immediately Build.BuildContext.compile(self) return ret def compile(self): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/fsc.py0000660000000000000000000000356500000000000022051 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Experimental F# stuff FSC="mono /path/to/fsc.exe" waf configure build """ from waflib import Utils, Task from waflib.TaskGen import before_method, after_method, feature from waflib.Tools import ccroot, cs ccroot.USELIB_VARS['fsc'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES']) @feature('fs') @before_method('process_source') def apply_fsc(self): cs_nodes = [] no_nodes = [] for x in self.to_nodes(self.source): if x.name.endswith('.fs'): cs_nodes.append(x) else: no_nodes.append(x) self.source = no_nodes bintype = getattr(self, 'type', self.gen.endswith('.dll') and 'library' or 'exe') self.cs_task = tsk = self.create_task('fsc', cs_nodes, self.path.find_or_declare(self.gen)) tsk.env.CSTYPE = '/target:%s' % bintype tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath() inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}') if inst_to: # note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644) self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod) feature('fs')(cs.use_cs) after_method('apply_fsc')(cs.use_cs) feature('fs')(cs.debug_cs) after_method('apply_fsc', 'use_cs')(cs.debug_cs) class fsc(Task.Task): """ Compile F# files """ color = 'YELLOW' run_str = '${FSC} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}' def configure(conf): """ Find a F# compiler, set the variable FSC for the compiler and FS_NAME (mono or fsc) """ conf.find_program(['fsc.exe', 'fsharpc'], var='FSC') conf.env.ASS_ST = '/r:%s' conf.env.RES_ST = '/resource:%s' conf.env.FS_NAME = 'fsc' if str(conf.env.FSC).lower().find('fsharpc') > -1: conf.env.FS_NAME = 'mono' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/gccdeps.py0000660000000000000000000001560200000000000022701 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2010 (ita) """ Execute the tasks with gcc -MD, read the dependencies from the .d file and prepare the dependency calculation for the next run. This affects the cxx class, so make sure to load Qt5 after this tool. Usage:: def options(opt): opt.load('compiler_cxx') def configure(conf): conf.load('compiler_cxx gccdeps') """ import os, re, threading from waflib import Task, Logs, Utils, Errors from waflib.Tools import c_preproc from waflib.TaskGen import before_method, feature lock = threading.Lock() gccdeps_flags = ['-MD'] if not c_preproc.go_absolute: gccdeps_flags = ['-MMD'] # Third-party tools are allowed to add extra names in here with append() supported_compilers = ['gas', 'gcc', 'icc', 'clang'] def scan(self): if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: return super(self.derived_gccdeps, self).scan() nodes = self.generator.bld.node_deps.get(self.uid(), []) names = [] return (nodes, names) re_o = re.compile(r"\.o$") re_splitter = re.compile(r'(?= 0: return line[sep_idx + 2:] else: return line def path_to_node(base_node, path, cached_nodes): # Take the base node and the path and return a node # Results are cached because searching the node tree is expensive # The following code is executed by threads, it is not safe, so a lock is needed... if getattr(path, '__hash__'): node_lookup_key = (base_node, path) else: # Not hashable, assume it is a list and join into a string node_lookup_key = (base_node, os.path.sep.join(path)) try: lock.acquire() node = cached_nodes[node_lookup_key] except KeyError: node = base_node.find_resource(path) cached_nodes[node_lookup_key] = node finally: lock.release() return node def post_run(self): if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: return super(self.derived_gccdeps, self).post_run() name = self.outputs[0].abspath() name = re_o.sub('.d', name) try: txt = Utils.readf(name) except EnvironmentError: Logs.error('Could not find a .d dependency file, are cflags/cxxflags overwritten?') raise #os.remove(name) # Compilers have the choice to either output the file's dependencies # as one large Makefile rule: # # /path/to/file.o: /path/to/dep1.h \ # /path/to/dep2.h \ # /path/to/dep3.h \ # ... # # or as many individual rules: # # /path/to/file.o: /path/to/dep1.h # /path/to/file.o: /path/to/dep2.h # /path/to/file.o: /path/to/dep3.h # ... # # So the first step is to sanitize the input by stripping out the left- # hand side of all these lines. After that, whatever remains are the # implicit dependencies of task.outputs[0] txt = '\n'.join([remove_makefile_rule_lhs(line) for line in txt.splitlines()]) # Now join all the lines together txt = txt.replace('\\\n', '') val = txt.strip() val = [x.replace('\\ ', ' ') for x in re_splitter.split(val) if x] nodes = [] bld = self.generator.bld # Dynamically bind to the cache try: cached_nodes = bld.cached_nodes except AttributeError: cached_nodes = bld.cached_nodes = {} for x in val: node = None if os.path.isabs(x): node = path_to_node(bld.root, x, cached_nodes) else: # TODO waf 1.9 - single cwd value path = getattr(bld, 'cwdx', bld.bldnode) # when calling find_resource, make sure the path does not contain '..' x = [k for k in Utils.split_path(x) if k and k != '.'] while '..' in x: idx = x.index('..') if idx == 0: x = x[1:] path = path.parent else: del x[idx] del x[idx-1] node = path_to_node(path, x, cached_nodes) if not node: raise ValueError('could not find %r for %r' % (x, self)) if id(node) == id(self.inputs[0]): # ignore the source file, it is already in the dependencies # this way, successful config tests may be retrieved from the cache continue nodes.append(node) Logs.debug('deps: gccdeps for %s returned %s', self, nodes) bld.node_deps[self.uid()] = nodes bld.raw_deps[self.uid()] = [] try: del self.cache_sig except AttributeError: pass Task.Task.post_run(self) def sig_implicit_deps(self): if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: return super(self.derived_gccdeps, self).sig_implicit_deps() bld = self.generator.bld try: return self.compute_sig_implicit_deps() except Errors.TaskNotReady: raise ValueError("Please specify the build order precisely with gccdeps (asm/c/c++ tasks)") except EnvironmentError: # If a file is renamed, assume the dependencies are stale and must be recalculated for x in bld.node_deps.get(self.uid(), []): if not x.is_bld() and not x.exists(): try: del x.parent.children[x.name] except KeyError: pass key = self.uid() bld.node_deps[key] = [] bld.raw_deps[key] = [] return Utils.SIG_NIL def wrap_compiled_task(classname): derived_class = type(classname, (Task.classes[classname],), {}) derived_class.derived_gccdeps = derived_class derived_class.post_run = post_run derived_class.scan = scan derived_class.sig_implicit_deps = sig_implicit_deps for k in ('asm', 'c', 'cxx'): if k in Task.classes: wrap_compiled_task(k) @before_method('process_source') @feature('force_gccdeps') def force_gccdeps(self): self.env.ENABLE_GCCDEPS = ['asm', 'c', 'cxx'] def configure(conf): # in case someone provides a --enable-gccdeps command-line option if not getattr(conf.options, 'enable_gccdeps', True): return global gccdeps_flags flags = conf.env.GCCDEPS_FLAGS or gccdeps_flags if conf.env.ASM_NAME in supported_compilers: try: conf.check(fragment='', features='asm force_gccdeps', asflags=flags, compile_filename='test.S', msg='Checking for asm flags %r' % ''.join(flags)) except Errors.ConfigurationError: pass else: conf.env.append_value('ASFLAGS', flags) conf.env.append_unique('ENABLE_GCCDEPS', 'asm') if conf.env.CC_NAME in supported_compilers: try: conf.check(fragment='int main() { return 0; }', features='c force_gccdeps', cflags=flags, msg='Checking for c flags %r' % ''.join(flags)) except Errors.ConfigurationError: pass else: conf.env.append_value('CFLAGS', flags) conf.env.append_unique('ENABLE_GCCDEPS', 'c') if conf.env.CXX_NAME in supported_compilers: try: conf.check(fragment='int main() { return 0; }', features='cxx force_gccdeps', cxxflags=flags, msg='Checking for cxx flags %r' % ''.join(flags)) except Errors.ConfigurationError: pass else: conf.env.append_value('CXXFLAGS', flags) conf.env.append_unique('ENABLE_GCCDEPS', 'cxx') def options(opt): raise ValueError('Do not load gccdeps options') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/gdbus.py0000660000000000000000000000553200000000000022376 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Copyright Garmin International or its subsidiaries, 2018 # # Heavily based on dbus.py """ Compiles dbus files with **gdbus-codegen** Typical usage:: def options(opt): opt.load('compiler_c gdbus') def configure(conf): conf.load('compiler_c gdbus') def build(bld): tg = bld.program( includes = '.', source = bld.path.ant_glob('*.c'), target = 'gnome-hello') tg.add_gdbus_file('test.xml', 'com.example.example.', 'Example') """ from waflib import Task, Errors, Utils from waflib.TaskGen import taskgen_method, before_method @taskgen_method def add_gdbus_file(self, filename, prefix, namespace, export=False): """ Adds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*. :param filename: xml file to compile :type filename: string :param prefix: interface prefix (--interface-prefix=prefix) :type prefix: string :param mode: C namespace (--c-namespace=namespace) :type mode: string :param export: Export Headers? :type export: boolean """ if not hasattr(self, 'gdbus_lst'): self.gdbus_lst = [] if not 'process_gdbus' in self.meths: self.meths.append('process_gdbus') self.gdbus_lst.append([filename, prefix, namespace, export]) @before_method('process_source') def process_gdbus(self): """ Processes the dbus files stored in the attribute *gdbus_lst* to create :py:class:`gdbus_binding_tool` instances. """ output_node = self.path.get_bld().make_node(['gdbus', self.get_name()]) sources = [] for filename, prefix, namespace, export in getattr(self, 'gdbus_lst', []): node = self.path.find_resource(filename) if not node: raise Errors.WafError('file not found ' + filename) c_file = output_node.find_or_declare(node.change_ext('.c').name) h_file = output_node.find_or_declare(node.change_ext('.h').name) tsk = self.create_task('gdbus_binding_tool', node, [c_file, h_file]) tsk.cwd = output_node.abspath() tsk.env.GDBUS_CODEGEN_INTERFACE_PREFIX = prefix tsk.env.GDBUS_CODEGEN_NAMESPACE = namespace tsk.env.GDBUS_CODEGEN_OUTPUT = node.change_ext('').name sources.append(c_file) if sources: output_node.mkdir() self.source = Utils.to_list(self.source) + sources self.includes = [output_node] + self.to_incnodes(getattr(self, 'includes', [])) if export: self.export_includes = [output_node] + self.to_incnodes(getattr(self, 'export_includes', [])) class gdbus_binding_tool(Task.Task): """ Compiles a dbus file """ color = 'BLUE' ext_out = ['.h', '.c'] run_str = '${GDBUS_CODEGEN} --interface-prefix ${GDBUS_CODEGEN_INTERFACE_PREFIX} --generate-c-code ${GDBUS_CODEGEN_OUTPUT} --c-namespace ${GDBUS_CODEGEN_NAMESPACE} --c-generate-object-manager ${SRC[0].abspath()}' shell = True def configure(conf): """ Detects the program gdbus-codegen and sets ``conf.env.GDBUS_CODEGEN`` """ conf.find_program('gdbus-codegen', var='GDBUS_CODEGEN') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.2574158 tevent-0.11.0/third_party/waf/waflib/extras/genpybind.py0000660000000000000000000001553000000000000023250 0ustar00rootroot00000000000000import os import pipes import subprocess import sys from waflib import Logs, Task, Context from waflib.Tools.c_preproc import scan as scan_impl # ^-- Note: waflib.extras.gccdeps.scan does not work for us, # due to its current implementation: # The -MD flag is injected into the {C,CXX}FLAGS environment variable and # dependencies are read out in a separate step after compiling by reading # the .d file saved alongside the object file. # As the genpybind task refers to a header file that is never compiled itself, # gccdeps will not be able to extract the list of dependencies. from waflib.TaskGen import feature, before_method def join_args(args): return " ".join(pipes.quote(arg) for arg in args) def configure(cfg): cfg.load("compiler_cxx") cfg.load("python") cfg.check_python_version(minver=(2, 7)) if not cfg.env.LLVM_CONFIG: cfg.find_program("llvm-config", var="LLVM_CONFIG") if not cfg.env.GENPYBIND: cfg.find_program("genpybind", var="GENPYBIND") # find clang reasource dir for builtin headers cfg.env.GENPYBIND_RESOURCE_DIR = os.path.join( cfg.cmd_and_log(cfg.env.LLVM_CONFIG + ["--libdir"]).strip(), "clang", cfg.cmd_and_log(cfg.env.LLVM_CONFIG + ["--version"]).strip()) if os.path.exists(cfg.env.GENPYBIND_RESOURCE_DIR): cfg.msg("Checking clang resource dir", cfg.env.GENPYBIND_RESOURCE_DIR) else: cfg.fatal("Clang resource dir not found") @feature("genpybind") @before_method("process_source") def generate_genpybind_source(self): """ Run genpybind on the headers provided in `source` and compile/link the generated code instead. This works by generating the code on the fly and swapping the source node before `process_source` is run. """ # name of module defaults to name of target module = getattr(self, "module", self.target) # create temporary source file in build directory to hold generated code out = "genpybind-%s.%d.cpp" % (module, self.idx) out = self.path.get_bld().find_or_declare(out) task = self.create_task("genpybind", self.to_nodes(self.source), out) # used to detect whether CFLAGS or CXXFLAGS should be passed to genpybind task.features = self.features task.module = module # can be used to select definitions to include in the current module # (when header files are shared by more than one module) task.genpybind_tags = self.to_list(getattr(self, "genpybind_tags", [])) # additional include directories task.includes = self.to_list(getattr(self, "includes", [])) task.genpybind = self.env.GENPYBIND # Tell waf to compile/link the generated code instead of the headers # originally passed-in via the `source` parameter. (see `process_source`) self.source = [out] class genpybind(Task.Task): # pylint: disable=invalid-name """ Runs genpybind on headers provided as input to this task. Generated code will be written to the first (and only) output node. """ quiet = True color = "PINK" scan = scan_impl @staticmethod def keyword(): return "Analyzing" def run(self): if not self.inputs: return args = self.find_genpybind() + self._arguments( resource_dir=self.env.GENPYBIND_RESOURCE_DIR) output = self.run_genpybind(args) # For debugging / log output pasteable_command = join_args(args) # write generated code to file in build directory # (will be compiled during process_source stage) (output_node,) = self.outputs output_node.write("// {}\n{}\n".format( pasteable_command.replace("\n", "\n// "), output)) def find_genpybind(self): return self.genpybind def run_genpybind(self, args): bld = self.generator.bld kwargs = dict(cwd=bld.variant_dir) if hasattr(bld, "log_command"): bld.log_command(args, kwargs) else: Logs.debug("runner: {!r}".format(args)) proc = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) stdout, stderr = proc.communicate() if not isinstance(stdout, str): stdout = stdout.decode(sys.stdout.encoding, errors="replace") if not isinstance(stderr, str): stderr = stderr.decode(sys.stderr.encoding, errors="replace") if proc.returncode != 0: bld.fatal( "genpybind returned {code} during the following call:" "\n{command}\n\n{stdout}\n\n{stderr}".format( code=proc.returncode, command=join_args(args), stdout=stdout, stderr=stderr, )) if stderr.strip(): Logs.debug("non-fatal warnings during genpybind run:\n{}".format(stderr)) return stdout def _include_paths(self): return self.generator.to_incnodes(self.includes + self.env.INCLUDES) def _inputs_as_relative_includes(self): include_paths = self._include_paths() relative_includes = [] for node in self.inputs: for inc in include_paths: if node.is_child_of(inc): relative_includes.append(node.path_from(inc)) break else: self.generator.bld.fatal("could not resolve {}".format(node)) return relative_includes def _arguments(self, genpybind_parse=None, resource_dir=None): args = [] relative_includes = self._inputs_as_relative_includes() is_cxx = "cxx" in self.features # options for genpybind args.extend(["--genpybind-module", self.module]) if self.genpybind_tags: args.extend(["--genpybind-tag"] + self.genpybind_tags) if relative_includes: args.extend(["--genpybind-include"] + relative_includes) if genpybind_parse: args.extend(["--genpybind-parse", genpybind_parse]) args.append("--") # headers to be processed by genpybind args.extend(node.abspath() for node in self.inputs) args.append("--") # options for clang/genpybind-parse args.append("-D__GENPYBIND__") args.append("-xc++" if is_cxx else "-xc") has_std_argument = False for flag in self.env["CXXFLAGS" if is_cxx else "CFLAGS"]: flag = flag.replace("-std=gnu", "-std=c") if flag.startswith("-std=c"): has_std_argument = True args.append(flag) if not has_std_argument: args.append("-std=c++14") args.extend("-I{}".format(n.abspath()) for n in self._include_paths()) args.extend("-D{}".format(p) for p in self.env.DEFINES) # point to clang resource dir, if specified if resource_dir: args.append("-resource-dir={}".format(resource_dir)) return args ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/gob2.py0000660000000000000000000000047200000000000022121 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 from waflib import TaskGen TaskGen.declare_chain( name = 'gob2', rule = '${GOB2} -o ${TGT[0].bld_dir()} ${GOB2FLAGS} ${SRC}', ext_in = '.gob', ext_out = '.c' ) def configure(conf): conf.find_program('gob2', var='GOB2') conf.env['GOB2FLAGS'] = '' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/halide.py0000660000000000000000000000762300000000000022523 0ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # Halide code generation tool __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ Tool to run `Halide `_ code generators. Usage:: bld( name='pipeline', # ^ Reference this in use="..." for things using the generated code #target=['pipeline.o', 'pipeline.h'] # ^ by default, name.{o,h} is added, but you can set the outputs here features='halide', halide_env="HL_TRACE=1 HL_TARGET=host-opencl-gpu_debug", # ^ Environment passed to the generator, # can be a dict, k/v list, or string. args=[], # ^ Command-line arguments to the generator (optional), # eg. to give parameters to the scheduling source='pipeline_gen', # ^ Name of the source executable ) Known issues: - Currently only supports Linux (no ".exe") - Doesn't rerun on input modification when input is part of a build chain, and has been modified externally. """ import os from waflib import Task, Utils, Options, TaskGen, Errors class run_halide_gen(Task.Task): color = 'CYAN' vars = ['HALIDE_ENV', 'HALIDE_ARGS'] run_str = "${SRC[0].abspath()} ${HALIDE_ARGS}" def __str__(self): stuff = "halide" stuff += ("[%s]" % (",".join( ('%s=%s' % (k,v)) for k, v in sorted(self.env.env.items())))) return Task.Task.__str__(self).replace(self.__class__.__name__, stuff) @TaskGen.feature('halide') @TaskGen.before_method('process_source') def halide(self): Utils.def_attrs(self, args=[], halide_env={}, ) bld = self.bld env = self.halide_env try: if isinstance(env, str): env = dict(x.split('=') for x in env.split()) elif isinstance(env, list): env = dict(x.split('=') for x in env) assert isinstance(env, dict) except Exception as e: if not isinstance(e, ValueError) \ and not isinstance(e, AssertionError): raise raise Errors.WafError( "halide_env must be under the form" \ " {'HL_x':'a', 'HL_y':'b'}" \ " or ['HL_x=y', 'HL_y=b']" \ " or 'HL_x=y HL_y=b'") src = self.to_nodes(self.source) assert len(src) == 1, "Only one source expected" src = src[0] args = Utils.to_list(self.args) def change_ext(src, ext): # Return a node with a new extension, in an appropriate folder name = src.name xpos = src.name.rfind('.') if xpos == -1: xpos = len(src.name) newname = name[:xpos] + ext if src.is_child_of(bld.bldnode): node = src.get_src().parent.find_or_declare(newname) else: node = bld.bldnode.find_or_declare(newname) return node def to_nodes(self, lst, path=None): tmp = [] path = path or self.path find = path.find_or_declare if isinstance(lst, self.path.__class__): lst = [lst] for x in Utils.to_list(lst): if isinstance(x, str): node = find(x) else: node = x tmp.append(node) return tmp tgt = to_nodes(self, self.target) if not tgt: tgt = [change_ext(src, '.o'), change_ext(src, '.h')] cwd = tgt[0].parent.abspath() task = self.create_task('run_halide_gen', src, tgt, cwd=cwd) task.env.append_unique('HALIDE_ARGS', args) if task.env.env == []: task.env.env = {} task.env.env.update(env) task.env.HALIDE_ENV = " ".join(("%s=%s" % (k,v)) for (k,v) in sorted(env.items())) task.env.HALIDE_ARGS = args try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] self.source = [] def configure(conf): if Options.options.halide_root is None: conf.check_cfg(package='Halide', args='--cflags --libs') else: halide_root = Options.options.halide_root conf.env.INCLUDES_HALIDE = [ os.path.join(halide_root, "include") ] conf.env.LIBPATH_HALIDE = [ os.path.join(halide_root, "lib") ] conf.env.LIB_HALIDE = ["Halide"] # You might want to add this, while upstream doesn't fix it #conf.env.LIB_HALIDE += ['ncurses', 'dl', 'pthread'] def options(opt): opt.add_option('--halide-root', help="path to Halide include and lib files", ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/extras/javatest.py0000770000000000000000000002025700000000000023116 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Federico Pellegrin, 2019 (fedepell) """ Provides Java Unit test support using :py:class:`waflib.Tools.waf_unit_test.utest` task via the **javatest** feature. This gives the possibility to run unit test and have them integrated into the standard waf unit test environment. It has been tested with TestNG and JUnit but should be easily expandable to other frameworks given the flexibility of ut_str provided by the standard waf unit test environment. The extra takes care also of managing non-java dependencies (ie. C/C++ libraries using JNI or Python modules via JEP) and setting up the environment needed to run them. Example usage: def options(opt): opt.load('java waf_unit_test javatest') def configure(conf): conf.load('java javatest') def build(bld): [ ... mainprog is built here ... ] bld(features = 'javac javatest', srcdir = 'test/', outdir = 'test', sourcepath = ['test'], classpath = [ 'src' ], basedir = 'test', use = ['JAVATEST', 'mainprog'], # mainprog is the program being tested in src/ ut_str = 'java -cp ${CLASSPATH} ${JTRUNNER} ${SRC}', jtest_source = bld.path.ant_glob('test/*.xml'), ) At command line the CLASSPATH where to find the testing environment and the test runner (default TestNG) that will then be seen in the environment as CLASSPATH_JAVATEST (then used for use) and JTRUNNER and can be used for dependencies and ut_str generation. Example configure for TestNG: waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar --jtrunner=org.testng.TestNG or as default runner is TestNG: waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar Example configure for JUnit: waf configure --jtpath=/tmp/junit.jar --jtrunner=org.junit.runner.JUnitCore The runner class presence on the system is checked for at configuration stage. """ import os from waflib import Task, TaskGen, Options, Errors, Utils, Logs from waflib.Tools import ccroot JAR_RE = '**/*' def _process_use_rec(self, name): """ Recursively process ``use`` for task generator with name ``name``.. Used by javatest_process_use. """ if name in self.javatest_use_not or name in self.javatest_use_seen: return try: tg = self.bld.get_tgen_by_name(name) except Errors.WafError: self.javatest_use_not.add(name) return self.javatest_use_seen.append(name) tg.post() for n in self.to_list(getattr(tg, 'use', [])): _process_use_rec(self, n) @TaskGen.feature('javatest') @TaskGen.after_method('process_source', 'apply_link', 'use_javac_files') def javatest_process_use(self): """ Process the ``use`` attribute which contains a list of task generator names and store paths that later is used to populate the unit test runtime environment. """ self.javatest_use_not = set() self.javatest_use_seen = [] self.javatest_libpaths = [] # strings or Nodes self.javatest_pypaths = [] # strings or Nodes self.javatest_dep_nodes = [] names = self.to_list(getattr(self, 'use', [])) for name in names: _process_use_rec(self, name) def extend_unique(lst, varlst): ext = [] for x in varlst: if x not in lst: ext.append(x) lst.extend(ext) # Collect type specific info needed to construct a valid runtime environment # for the test. for name in self.javatest_use_seen: tg = self.bld.get_tgen_by_name(name) # Python-Java embedding crosstools such as JEP if 'py' in tg.features: # Python dependencies are added to PYTHONPATH pypath = getattr(tg, 'install_from', tg.path) if 'buildcopy' in tg.features: # Since buildcopy is used we assume that PYTHONPATH in build should be used, # not source extend_unique(self.javatest_pypaths, [pypath.get_bld().abspath()]) # Add buildcopy output nodes to dependencies extend_unique(self.javatest_dep_nodes, [o for task in getattr(tg, 'tasks', []) for o in getattr(task, 'outputs', [])]) else: # If buildcopy is not used, depend on sources instead extend_unique(self.javatest_dep_nodes, tg.source) extend_unique(self.javatest_pypaths, [pypath.abspath()]) if getattr(tg, 'link_task', None): # For tasks with a link_task (C, C++, D et.c.) include their library paths: if not isinstance(tg.link_task, ccroot.stlink_task): extend_unique(self.javatest_dep_nodes, tg.link_task.outputs) extend_unique(self.javatest_libpaths, tg.link_task.env.LIBPATH) if 'pyext' in tg.features: # If the taskgen is extending Python we also want to add the interpreter libpath. extend_unique(self.javatest_libpaths, tg.link_task.env.LIBPATH_PYEXT) else: # Only add to libpath if the link task is not a Python extension extend_unique(self.javatest_libpaths, [tg.link_task.outputs[0].parent.abspath()]) if 'javac' in tg.features or 'jar' in tg.features: if hasattr(tg, 'jar_task'): # For Java JAR tasks depend on generated JAR extend_unique(self.javatest_dep_nodes, tg.jar_task.outputs) else: # For Java non-JAR ones we need to glob generated files (Java output files are not predictable) if hasattr(tg, 'outdir'): base_node = tg.outdir else: base_node = tg.path.get_bld() self.javatest_dep_nodes.extend([dx for dx in base_node.ant_glob(JAR_RE, remove=False, quiet=True)]) @TaskGen.feature('javatest') @TaskGen.after_method('apply_java', 'use_javac_files', 'set_classpath', 'javatest_process_use') def make_javatest(self): """ Creates a ``utest`` task with a populated environment for Java Unit test execution """ tsk = self.create_task('utest') tsk.set_run_after(self.javac_task) # Dependencies from recursive use analysis tsk.dep_nodes.extend(self.javatest_dep_nodes) # Put test input files as waf_unit_test relies on that for some prints and log generation # If jtest_source is there, this is specially useful for passing XML for TestNG # that contain test specification, use that as inputs, otherwise test sources if getattr(self, 'jtest_source', None): tsk.inputs = self.to_nodes(self.jtest_source) else: if self.javac_task.srcdir[0].exists(): tsk.inputs = self.javac_task.srcdir[0].ant_glob('**/*.java', remove=False) if getattr(self, 'ut_str', None): self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) tsk.vars = lst + tsk.vars if getattr(self, 'ut_cwd', None): if isinstance(self.ut_cwd, str): # we want a Node instance if os.path.isabs(self.ut_cwd): self.ut_cwd = self.bld.root.make_node(self.ut_cwd) else: self.ut_cwd = self.path.make_node(self.ut_cwd) else: self.ut_cwd = self.bld.bldnode # Get parent CLASSPATH and add output dir of test, we run from wscript dir # We have to change it from list to the standard java -cp format (: separated) tsk.env.CLASSPATH = ':'.join(self.env.CLASSPATH) + ':' + self.outdir.abspath() if not self.ut_cwd.exists(): self.ut_cwd.mkdir() if not hasattr(self, 'ut_env'): self.ut_env = dict(os.environ) def add_paths(var, lst): # Add list of paths to a variable, lst can contain strings or nodes lst = [ str(n) for n in lst ] Logs.debug("ut: %s: Adding paths %s=%s", self, var, lst) self.ut_env[var] = os.pathsep.join(lst) + os.pathsep + self.ut_env.get(var, '') add_paths('PYTHONPATH', self.javatest_pypaths) if Utils.is_win32: add_paths('PATH', self.javatest_libpaths) elif Utils.unversioned_sys_platform() == 'darwin': add_paths('DYLD_LIBRARY_PATH', self.javatest_libpaths) add_paths('LD_LIBRARY_PATH', self.javatest_libpaths) else: add_paths('LD_LIBRARY_PATH', self.javatest_libpaths) def configure(ctx): cp = ctx.env.CLASSPATH or '.' if getattr(Options.options, 'jtpath', None): ctx.env.CLASSPATH_JAVATEST = getattr(Options.options, 'jtpath').split(':') cp += ':' + getattr(Options.options, 'jtpath') if getattr(Options.options, 'jtrunner', None): ctx.env.JTRUNNER = getattr(Options.options, 'jtrunner') if ctx.check_java_class(ctx.env.JTRUNNER, with_classpath=cp): ctx.fatal('Could not run test class %r' % ctx.env.JTRUNNER) def options(opt): opt.add_option('--jtpath', action='store', default='', dest='jtpath', help='Path to jar(s) needed for javatest execution, colon separated, if not in the system CLASSPATH') opt.add_option('--jtrunner', action='store', default='org.testng.TestNG', dest='jtrunner', help='Class to run javatest test [default: org.testng.TestNG]') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/kde4.py0000660000000000000000000000524300000000000022120 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Support for the KDE4 libraries and msgfmt """ import os, re from waflib import Task, Utils from waflib.TaskGen import feature @feature('msgfmt') def apply_msgfmt(self): """ Process all languages to create .mo files and to install them:: def build(bld): bld(features='msgfmt', langs='es de fr', appname='myapp', install_path='${KDE4_LOCALE_INSTALL_DIR}') """ for lang in self.to_list(self.langs): node = self.path.find_resource(lang+'.po') task = self.create_task('msgfmt', node, node.change_ext('.mo')) langname = lang.split('/') langname = langname[-1] inst = getattr(self, 'install_path', '${KDE4_LOCALE_INSTALL_DIR}') self.add_install_as( inst_to = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + getattr(self, 'appname', 'set_your_appname') + '.mo', inst_from = task.outputs[0], chmod = getattr(self, 'chmod', Utils.O644)) class msgfmt(Task.Task): """ Transform .po files into .mo files """ color = 'BLUE' run_str = '${MSGFMT} ${SRC} -o ${TGT}' def configure(self): """ Detect kde4-config and set various variables for the *use* system:: def options(opt): opt.load('compiler_cxx kde4') def configure(conf): conf.load('compiler_cxx kde4') def build(bld): bld.program(source='main.c', target='app', use='KDECORE KIO KHTML') """ kdeconfig = self.find_program('kde4-config') prefix = self.cmd_and_log(kdeconfig + ['--prefix']).strip() fname = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix try: os.stat(fname) except OSError: fname = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix try: os.stat(fname) except OSError: self.fatal('could not open %s' % fname) try: txt = Utils.readf(fname) except EnvironmentError: self.fatal('could not read %s' % fname) txt = txt.replace('\\\n', '\n') fu = re.compile('#(.*)\n') txt = fu.sub('', txt) setregexp = re.compile(r'([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)') found = setregexp.findall(txt) for (_, key, val) in found: #print key, val self.env[key] = val # well well, i could just write an interpreter for cmake files self.env['LIB_KDECORE']= ['kdecore'] self.env['LIB_KDEUI'] = ['kdeui'] self.env['LIB_KIO'] = ['kio'] self.env['LIB_KHTML'] = ['khtml'] self.env['LIB_KPARTS'] = ['kparts'] self.env['LIBPATH_KDECORE'] = [os.path.join(self.env.KDE4_LIB_INSTALL_DIR, 'kde4', 'devel'), self.env.KDE4_LIB_INSTALL_DIR] self.env['INCLUDES_KDECORE'] = [self.env['KDE4_INCLUDE_INSTALL_DIR']] self.env.append_value('INCLUDES_KDECORE', [self.env['KDE4_INCLUDE_INSTALL_DIR']+ os.sep + 'KDE']) self.find_program('msgfmt', var='MSGFMT') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.2574158 tevent-0.11.0/third_party/waf/waflib/extras/local_rpath.py0000660000000000000000000000112200000000000023551 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) import copy from waflib.TaskGen import after_method, feature @after_method('propagate_uselib_vars') @feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib', 'fcprogram', 'fcshlib') def add_rpath_stuff(self): all = copy.copy(self.to_list(getattr(self, 'use', []))) while all: name = all.pop() try: tg = self.bld.get_tgen_by_name(name) except: continue if hasattr(tg, 'link_task'): self.env.append_value('RPATH', tg.link_task.outputs[0].parent.abspath()) all.extend(self.to_list(getattr(tg, 'use', []))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/make.py0000660000000000000000000000620200000000000022202 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ A make-like way of executing the build, following the relationships between inputs/outputs This algorithm will lead to slower builds, will not be as flexible as "waf build", but it might be useful for building data files (?) It is likely to break in the following cases: - files are created dynamically (no inputs or outputs) - headers - building two files from different groups """ import re from waflib import Options, Task from waflib.Build import BuildContext class MakeContext(BuildContext): '''executes tasks in a step-by-step manner, following dependencies between inputs/outputs''' cmd = 'make' fun = 'build' def __init__(self, **kw): super(MakeContext, self).__init__(**kw) self.files = Options.options.files def get_build_iterator(self): if not self.files: while 1: yield super(MakeContext, self).get_build_iterator() for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() provides = {} uses = {} all_tasks = [] tasks = [] for pat in self.files.split(','): matcher = self.get_matcher(pat) for tg in g: if isinstance(tg, Task.Task): lst = [tg] else: lst = tg.tasks for tsk in lst: all_tasks.append(tsk) do_exec = False for node in tsk.inputs: try: uses[node].append(tsk) except: uses[node] = [tsk] if matcher(node, output=False): do_exec = True break for node in tsk.outputs: try: provides[node].append(tsk) except: provides[node] = [tsk] if matcher(node, output=True): do_exec = True break if do_exec: tasks.append(tsk) # so we have the tasks that we need to process, the list of all tasks, # the map of the tasks providing nodes, and the map of tasks using nodes if not tasks: # if there are no tasks matching, return everything in the current group result = all_tasks else: # this is like a big filter... result = set() seen = set() cur = set(tasks) while cur: result |= cur tosee = set() for tsk in cur: for node in tsk.inputs: if node in seen: continue seen.add(node) tosee |= set(provides.get(node, [])) cur = tosee result = list(result) Task.set_file_constraints(result) Task.set_precedence_constraints(result) yield result while 1: yield [] def get_matcher(self, pat): # this returns a function inn = True out = True if pat.startswith('in:'): out = False pat = pat.replace('in:', '') elif pat.startswith('out:'): inn = False pat = pat.replace('out:', '') anode = self.root.find_node(pat) pattern = None if not anode: if not pat.startswith('^'): pat = '^.+?%s' % pat if not pat.endswith('$'): pat = '%s$' % pat pattern = re.compile(pat) def match(node, output): if output and not out: return False if not output and not inn: return False if anode: return anode == node else: return pattern.match(node.abspath()) return match ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/midl.py0000660000000000000000000000324300000000000022214 0ustar00rootroot00000000000000#!/usr/bin/env python # Issue 1185 ultrix gmail com """ Microsoft Interface Definition Language support. Given ComObject.idl, this tool will generate ComObject.tlb ComObject_i.h ComObject_i.c ComObject_p.c and dlldata.c To declare targets using midl:: def configure(conf): conf.load('msvc') conf.load('midl') def build(bld): bld( features='c cshlib', # Note: ComObject_i.c is generated from ComObject.idl source = 'main.c ComObject.idl ComObject_i.c', target = 'ComObject.dll') """ from waflib import Task, Utils from waflib.TaskGen import feature, before_method import os def configure(conf): conf.find_program(['midl'], var='MIDL') conf.env.MIDLFLAGS = [ '/nologo', '/D', '_DEBUG', '/W1', '/char', 'signed', '/Oicf', ] @feature('c', 'cxx') @before_method('process_source') def idl_file(self): # Do this before process_source so that the generated header can be resolved # when scanning source dependencies. idl_nodes = [] src_nodes = [] for node in Utils.to_list(self.source): if str(node).endswith('.idl'): idl_nodes.append(node) else: src_nodes.append(node) for node in self.to_nodes(idl_nodes): t = node.change_ext('.tlb') h = node.change_ext('_i.h') c = node.change_ext('_i.c') p = node.change_ext('_p.c') d = node.parent.find_or_declare('dlldata.c') self.create_task('midl', node, [t, h, c, p, d]) self.source = src_nodes class midl(Task.Task): """ Compile idl files """ color = 'YELLOW' run_str = '${MIDL} ${MIDLFLAGS} ${CPPPATH_ST:INCLUDES} /tlb ${TGT[0].bldpath()} /header ${TGT[1].bldpath()} /iid ${TGT[2].bldpath()} /proxy ${TGT[3].bldpath()} /dlldata ${TGT[4].bldpath()} ${SRC}' before = ['winrc'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5417175 tevent-0.11.0/third_party/waf/waflib/extras/msvc_pdb.py0000660000000000000000000000247300000000000023070 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Rafaël Kooi 2019 from waflib import TaskGen @TaskGen.feature('c', 'cxx', 'fc') @TaskGen.after_method('propagate_uselib_vars') def add_pdb_per_object(self): """For msvc/fortran, specify a unique compile pdb per object, to work around LNK4099. Flags are updated with a unique /Fd flag based on the task output name. This is separate from the link pdb. """ if not hasattr(self, 'compiled_tasks'): return link_task = getattr(self, 'link_task', None) for task in self.compiled_tasks: if task.inputs and task.inputs[0].name.lower().endswith('.rc'): continue add_pdb = False for flagname in ('CFLAGS', 'CXXFLAGS', 'FCFLAGS'): # several languages may be used at once for flag in task.env[flagname]: if flag[1:].lower() == 'zi': add_pdb = True break if add_pdb: node = task.outputs[0].change_ext('.pdb') pdb_flag = '/Fd:' + node.abspath() for flagname in ('CFLAGS', 'CXXFLAGS', 'FCFLAGS'): buf = [pdb_flag] for flag in task.env[flagname]: if flag[1:3] == 'Fd' or flag[1:].lower() == 'fs' or flag[1:].lower() == 'mp': continue buf.append(flag) task.env[flagname] = buf if link_task and not node in link_task.dep_nodes: link_task.dep_nodes.append(node) if not node in task.outputs: task.outputs.append(node) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/msvcdeps.py0000660000000000000000000001766700000000000023132 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Copyright Garmin International or its subsidiaries, 2012-2013 ''' Off-load dependency scanning from Python code to MSVC compiler This tool is safe to load in any environment; it will only activate the MSVC exploits when it finds that a particular taskgen uses MSVC to compile. Empirical testing shows about a 10% execution time savings from using this tool as compared to c_preproc. The technique of gutting scan() and pushing the dependency calculation down to post_run() is cribbed from gccdeps.py. This affects the cxx class, so make sure to load Qt5 after this tool. Usage:: def options(opt): opt.load('compiler_cxx') def configure(conf): conf.load('compiler_cxx msvcdeps') ''' import os, sys, tempfile, threading from waflib import Context, Errors, Logs, Task, Utils from waflib.Tools import c_preproc, c, cxx, msvc from waflib.TaskGen import feature, before_method lock = threading.Lock() nodes = {} # Cache the path -> Node lookup PREPROCESSOR_FLAG = '/showIncludes' INCLUDE_PATTERN = 'Note: including file:' # Extensible by outside tools supported_compilers = ['msvc'] @feature('c', 'cxx') @before_method('process_source') def apply_msvcdeps_flags(taskgen): if taskgen.env.CC_NAME not in supported_compilers: return for flag in ('CFLAGS', 'CXXFLAGS'): if taskgen.env.get_flat(flag).find(PREPROCESSOR_FLAG) < 0: taskgen.env.append_value(flag, PREPROCESSOR_FLAG) def path_to_node(base_node, path, cached_nodes): ''' Take the base node and the path and return a node Results are cached because searching the node tree is expensive The following code is executed by threads, it is not safe, so a lock is needed... ''' # normalize the path because ant_glob() does not understand # parent path components (..) path = os.path.normpath(path) # normalize the path case to increase likelihood of a cache hit path = os.path.normcase(path) # ant_glob interprets [] and () characters, so those must be replaced path = path.replace('[', '?').replace(']', '?').replace('(', '[(]').replace(')', '[)]') node_lookup_key = (base_node, path) try: node = cached_nodes[node_lookup_key] except KeyError: # retry with lock on cache miss with lock: try: node = cached_nodes[node_lookup_key] except KeyError: node_list = base_node.ant_glob([path], ignorecase=True, remove=False, quiet=True, regex=False) node = cached_nodes[node_lookup_key] = node_list[0] if node_list else None return node def post_run(self): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).post_run() # TODO this is unlikely to work with netcache if getattr(self, 'cached', None): return Task.Task.post_run(self) bld = self.generator.bld unresolved_names = [] resolved_nodes = [] # Dynamically bind to the cache try: cached_nodes = bld.cached_nodes except AttributeError: cached_nodes = bld.cached_nodes = {} for path in self.msvcdeps_paths: node = None if os.path.isabs(path): node = path_to_node(bld.root, path, cached_nodes) else: # when calling find_resource, make sure the path does not begin with '..' base_node = bld.bldnode path = [k for k in Utils.split_path(path) if k and k != '.'] while path[0] == '..': path.pop(0) base_node = base_node.parent path = os.sep.join(path) node = path_to_node(base_node, path, cached_nodes) if not node: raise ValueError('could not find %r for %r' % (path, self)) else: if not c_preproc.go_absolute: if not (node.is_child_of(bld.srcnode) or node.is_child_of(bld.bldnode)): # System library Logs.debug('msvcdeps: Ignoring system include %r', node) continue if id(node) == id(self.inputs[0]): # Self-dependency continue resolved_nodes.append(node) bld.node_deps[self.uid()] = resolved_nodes bld.raw_deps[self.uid()] = unresolved_names try: del self.cache_sig except AttributeError: pass Task.Task.post_run(self) def scan(self): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).scan() resolved_nodes = self.generator.bld.node_deps.get(self.uid(), []) unresolved_names = [] return (resolved_nodes, unresolved_names) def sig_implicit_deps(self): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).sig_implicit_deps() bld = self.generator.bld try: return self.compute_sig_implicit_deps() except Errors.TaskNotReady: raise ValueError("Please specify the build order precisely with msvcdeps (c/c++ tasks)") except EnvironmentError: # If a file is renamed, assume the dependencies are stale and must be recalculated for x in bld.node_deps.get(self.uid(), []): if not x.is_bld() and not x.exists(): try: del x.parent.children[x.name] except KeyError: pass key = self.uid() bld.node_deps[key] = [] bld.raw_deps[key] = [] return Utils.SIG_NIL def exec_command(self, cmd, **kw): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).exec_command(cmd, **kw) if not 'cwd' in kw: kw['cwd'] = self.get_cwd() if self.env.PATH: env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) env['PATH'] = self.env.PATH if isinstance(self.env.PATH, str) else os.pathsep.join(self.env.PATH) # The Visual Studio IDE adds an environment variable that causes # the MS compiler to send its textual output directly to the # debugging window rather than normal stdout/stderr. # # This is unrecoverably bad for this tool because it will cause # all the dependency scanning to see an empty stdout stream and # assume that the file being compiled uses no headers. # # See http://blogs.msdn.com/b/freik/archive/2006/04/05/569025.aspx # # Attempting to repair the situation by deleting the offending # envvar at this point in tool execution will not be good enough-- # its presence poisons the 'waf configure' step earlier. We just # want to put a sanity check here in order to help developers # quickly diagnose the issue if an otherwise-good Waf tree # is then executed inside the MSVS IDE. assert 'VS_UNICODE_OUTPUT' not in kw['env'] cmd, args = self.split_argfile(cmd) try: (fd, tmp) = tempfile.mkstemp() os.write(fd, '\r\n'.join(args).encode()) os.close(fd) self.msvcdeps_paths = [] kw['env'] = kw.get('env', os.environ.copy()) kw['cwd'] = kw.get('cwd', os.getcwd()) kw['quiet'] = Context.STDOUT kw['output'] = Context.STDOUT out = [] if Logs.verbose: Logs.debug('argfile: @%r -> %r', tmp, args) try: raw_out = self.generator.bld.cmd_and_log(cmd + ['@' + tmp], **kw) ret = 0 except Errors.WafError as e: # Use e.msg if e.stdout is not set raw_out = getattr(e, 'stdout', e.msg) # Return non-zero error code even if we didn't # get one from the exception object ret = getattr(e, 'returncode', 1) Logs.debug('msvcdeps: Running for: %s' % self.inputs[0]) for line in raw_out.splitlines(): if line.startswith(INCLUDE_PATTERN): # Only strip whitespace after log to preserve # dependency structure in debug output inc_path = line[len(INCLUDE_PATTERN):] Logs.debug('msvcdeps: Regex matched %s', inc_path) self.msvcdeps_paths.append(inc_path.strip()) else: out.append(line) # Pipe through the remaining stdout content (not related to /showIncludes) if self.generator.bld.logger: self.generator.bld.logger.debug('out: %s' % os.linesep.join(out)) else: sys.stdout.write(os.linesep.join(out) + os.linesep) return ret finally: try: os.remove(tmp) except OSError: # anti-virus and indexers can keep files open -_- pass def wrap_compiled_task(classname): derived_class = type(classname, (Task.classes[classname],), {}) derived_class.derived_msvcdeps = derived_class derived_class.post_run = post_run derived_class.scan = scan derived_class.sig_implicit_deps = sig_implicit_deps derived_class.exec_command = exec_command for k in ('c', 'cxx'): if k in Task.classes: wrap_compiled_task(k) def options(opt): raise ValueError('Do not load msvcdeps options') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/msvs.py0000660000000000000000000007452200000000000022267 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Avalanche Studios 2009-2011 # Thomas Nagy 2011 """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ To add this tool to your project: def options(conf): opt.load('msvs') It can be a good idea to add the sync_exec tool too. To generate solution files: $ waf configure msvs To customize the outputs, provide subclasses in your wscript files:: from waflib.extras import msvs class vsnode_target(msvs.vsnode_target): def get_build_command(self, props): # likely to be required return "waf.bat build" def collect_source(self): # likely to be required ... class msvs_bar(msvs.msvs_generator): def init(self): msvs.msvs_generator.init(self) self.vsnode_target = vsnode_target The msvs class re-uses the same build() function for reading the targets (task generators), you may therefore specify msvs settings on the context object:: def build(bld): bld.solution_name = 'foo.sln' bld.waf_command = 'waf.bat' bld.projects_dir = bld.srcnode.make_node('.depproj') bld.projects_dir.mkdir() For visual studio 2008, the command is called 'msvs2008', and the classes such as vsnode_target are wrapped by a decorator class 'wrap_2008' to provide special functionality. To customize platform toolsets, pass additional parameters, for example:: class msvs_2013(msvs.msvs_generator): cmd = 'msvs2013' numver = '13.00' vsver = '2013' platform_toolset_ver = 'v120' ASSUMPTIONS: * a project can be either a directory or a target, vcxproj files are written only for targets that have source files * each project is a vcxproj file, therefore the project uuid needs only to be a hash of the absolute path """ import os, re, sys import uuid # requires python 2.5 from waflib.Build import BuildContext from waflib import Utils, TaskGen, Logs, Task, Context, Node, Options HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' PROJECT_TEMPLATE = r''' ${for b in project.build_properties} ${b.configuration} ${b.platform} ${endfor} {${project.uuid}} MakeFileProj ${project.name} ${for b in project.build_properties} Makefile ${b.outdir} ${project.platform_toolset_ver} ${endfor} ${for b in project.build_properties} ${endfor} ${for b in project.build_properties} ${xml:project.get_build_command(b)} ${xml:project.get_rebuild_command(b)} ${xml:project.get_clean_command(b)} ${xml:b.includes_search_path} ${xml:b.preprocessor_definitions};$(NMakePreprocessorDefinitions) ${xml:b.includes_search_path} $(ExecutablePath) ${if getattr(b, 'output_file', None)} ${xml:b.output_file} ${endif} ${if getattr(b, 'deploy_dir', None)} ${xml:b.deploy_dir} ${endif} ${endfor} ${for b in project.build_properties} ${if getattr(b, 'deploy_dir', None)} CopyToHardDrive ${endif} ${endfor} ${for x in project.source} <${project.get_key(x)} Include='${x.win32path()}' /> ${endfor} ''' FILTER_TEMPLATE = ''' ${for x in project.source} <${project.get_key(x)} Include="${x.win32path()}"> ${project.get_filter_name(x.parent)} ${endfor} ${for x in project.dirs()} {${project.make_uuid(x.win32path())}} ${endfor} ''' PROJECT_2008_TEMPLATE = r''' ${if project.build_properties} ${for b in project.build_properties} ${endfor} ${else} ${endif} ${if project.build_properties} ${for b in project.build_properties} ${endfor} ${else} ${endif} ${project.display_filter()} ''' SOLUTION_TEMPLATE = '''Microsoft Visual Studio Solution File, Format Version ${project.numver} # Visual Studio ${project.vsver} ${for p in project.all_projects} Project("{${p.ptype()}}") = "${p.name}", "${p.title}", "{${p.uuid}}" EndProject${endfor} Global GlobalSection(SolutionConfigurationPlatforms) = preSolution ${if project.all_projects} ${for (configuration, platform) in project.all_projects[0].ctx.project_configurations()} ${configuration}|${platform} = ${configuration}|${platform} ${endfor} ${endif} EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution ${for p in project.all_projects} ${if hasattr(p, 'source')} ${for b in p.build_properties} {${p.uuid}}.${b.configuration}|${b.platform}.ActiveCfg = ${b.configuration}|${b.platform} ${if getattr(p, 'is_active', None)} {${p.uuid}}.${b.configuration}|${b.platform}.Build.0 = ${b.configuration}|${b.platform} ${endif} ${if getattr(p, 'is_deploy', None)} {${p.uuid}}.${b.configuration}|${b.platform}.Deploy.0 = ${b.configuration}|${b.platform} ${endif} ${endfor} ${endif} ${endfor} EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution ${for p in project.all_projects} ${if p.parent} {${p.uuid}} = {${p.parent.uuid}} ${endif} ${endfor} EndGlobalSection EndGlobal ''' COMPILE_TEMPLATE = '''def f(project): lst = [] def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") %s #f = open('cmd.txt', 'w') #f.write(str(lst)) #f.close() return ''.join(lst) ''' reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P[^}]*?)\})", re.M) def compile_template(line): """ Compile a template expression into a python function (like jsps, but way shorter) """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return "\\" elif g('subst'): extr.append(g('code')) return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') assert(extr) indent = 0 buf = [] app = buf.append def app(txt): buf.append(indent * '\t' + txt) for x in range(len(extr)): if params[x]: app("lst.append(%r)" % params[x]) f = extr[x] if f.startswith(('if', 'for')): app(f + ':') indent += 1 elif f.startswith('py:'): app(f[3:]) elif f.startswith(('endif', 'endfor')): indent -= 1 elif f.startswith(('else', 'elif')): indent -= 1 app(f + ':') indent += 1 elif f.startswith('xml:'): app('lst.append(xml_escape(%s))' % f[4:]) else: #app('lst.append((%s) or "cannot find %s")' % (f, f)) app('lst.append(%s)' % f) if extr: if params[-1]: app("lst.append(%r)" % params[-1]) fun = COMPILE_TEMPLATE % "\n\t".join(buf) #print(fun) return Task.funex(fun) re_blank = re.compile('(\n|\r|\\s)*\n', re.M) def rm_blank_lines(txt): txt = re_blank.sub('\r\n', txt) return txt BOM = '\xef\xbb\xbf' try: BOM = bytes(BOM, 'latin-1') # python 3 except TypeError: pass def stealth_write(self, data, flags='wb'): try: unicode except NameError: data = data.encode('utf-8') # python 3 else: data = data.decode(sys.getfilesystemencoding(), 'replace') data = data.encode('utf-8') if self.name.endswith(('.vcproj', '.vcxproj')): data = BOM + data try: txt = self.read(flags='rb') if txt != data: raise ValueError('must write') except (IOError, ValueError): self.write(data, flags=flags) else: Logs.debug('msvs: skipping %s', self.win32path()) Node.Node.stealth_write = stealth_write re_win32 = re.compile(r'^([/\\]cygdrive)?[/\\]([a-z])([^a-z0-9_-].*)', re.I) def win32path(self): p = self.abspath() m = re_win32.match(p) if m: return "%s:%s" % (m.group(2).upper(), m.group(3)) return p Node.Node.win32path = win32path re_quote = re.compile("[^a-zA-Z0-9-]") def quote(s): return re_quote.sub("_", s) def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") def make_uuid(v, prefix = None): """ simple utility function """ if isinstance(v, dict): keys = list(v.keys()) keys.sort() tmp = str([(k, v[k]) for k in keys]) else: tmp = str(v) d = Utils.md5(tmp.encode()).hexdigest().upper() if prefix: d = '%s%s' % (prefix, d[8:]) gid = uuid.UUID(d, version = 4) return str(gid).upper() def diff(node, fromnode): # difference between two nodes, but with "(..)" instead of ".." c1 = node c2 = fromnode c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while id(c1) != id(c2): lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent for i in range(up): lst.append('(..)') lst.reverse() return tuple(lst) class build_property(object): pass class vsnode(object): """ Abstract class representing visual studio elements We assume that all visual studio nodes have a uuid and a parent """ def __init__(self, ctx): self.ctx = ctx # msvs context self.name = '' # string, mandatory self.vspath = '' # path in visual studio (name for dirs, absolute path for projects) self.uuid = '' # string, mandatory self.parent = None # parent node for visual studio nesting def get_waf(self): """ Override in subclasses... """ return 'cd /d "%s" & %s' % (self.ctx.srcnode.win32path(), getattr(self.ctx, 'waf_command', 'waf.bat')) def ptype(self): """ Return a special uuid for projects written in the solution file """ pass def write(self): """ Write the project file, by default, do nothing """ pass def make_uuid(self, val): """ Alias for creating uuid values easily (the templates cannot access global variables) """ return make_uuid(val) class vsnode_vsdir(vsnode): """ Nodes representing visual studio folders (which do not match the filesystem tree!) """ VS_GUID_SOLUTIONFOLDER = "2150E333-8FDC-42A3-9474-1A3956D46DE8" def __init__(self, ctx, uuid, name, vspath=''): vsnode.__init__(self, ctx) self.title = self.name = name self.uuid = uuid self.vspath = vspath or name def ptype(self): return self.VS_GUID_SOLUTIONFOLDER class vsnode_project(vsnode): """ Abstract class representing visual studio project elements A project is assumed to be writable, and has a node representing the file to write to """ VS_GUID_VCPROJ = "8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942" def ptype(self): return self.VS_GUID_VCPROJ def __init__(self, ctx, node): vsnode.__init__(self, ctx) self.path = node self.uuid = make_uuid(node.win32path()) self.name = node.name self.platform_toolset_ver = getattr(ctx, 'platform_toolset_ver', None) self.title = self.path.win32path() self.source = [] # list of node objects self.build_properties = [] # list of properties (nmake commands, output dir, etc) def dirs(self): """ Get the list of parent folders of the source files (header files included) for writing the filters """ lst = [] def add(x): if x.height() > self.tg.path.height() and x not in lst: lst.append(x) add(x.parent) for x in self.source: add(x.parent) return lst def write(self): Logs.debug('msvs: creating %r', self.path) # first write the project file template1 = compile_template(PROJECT_TEMPLATE) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) # then write the filter template2 = compile_template(FILTER_TEMPLATE) filter_str = template2(self) filter_str = rm_blank_lines(filter_str) tmp = self.path.parent.make_node(self.path.name + '.filters') tmp.stealth_write(filter_str) def get_key(self, node): """ required for writing the source files """ name = node.name if name.endswith(('.cpp', '.c')): return 'ClCompile' return 'ClInclude' def collect_properties(self): """ Returns a list of triplet (configuration, platform, output_directory) """ ret = [] for c in self.ctx.configurations: for p in self.ctx.platforms: x = build_property() x.outdir = '' x.configuration = c x.platform = p x.preprocessor_definitions = '' x.includes_search_path = '' # can specify "deploy_dir" too ret.append(x) self.build_properties = ret def get_build_params(self, props): opt = '--execsolution=%s' % self.ctx.get_solution_node().win32path() return (self.get_waf(), opt) def get_build_command(self, props): return "%s build %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build %s" % self.get_build_params(props) def get_filter_name(self, node): lst = diff(node, self.tg.path) return '\\'.join(lst) or '.' class vsnode_alias(vsnode_project): def __init__(self, ctx, node, name): vsnode_project.__init__(self, ctx, node) self.name = name self.output_file = '' class vsnode_build_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make all" (starting one process by target is slow) This is the only alias enabled by default """ def __init__(self, ctx, node, name='build_all_projects'): vsnode_alias.__init__(self, ctx, node, name) self.is_active = True class vsnode_install_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make install" """ def __init__(self, ctx, node, name='install_all_projects'): vsnode_alias.__init__(self, ctx, node, name) def get_build_command(self, props): return "%s build install %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build install %s" % self.get_build_params(props) class vsnode_project_view(vsnode_alias): """ Fake target used to emulate a file system view """ def __init__(self, ctx, node, name='project_view'): vsnode_alias.__init__(self, ctx, node, name) self.tg = self.ctx() # fake one, cannot remove self.exclude_files = Node.exclude_regs + ''' waf-2* waf3-2*/** .waf-2* .waf3-2*/** **/*.sdf **/*.suo **/*.ncb **/%s ''' % Options.lockfile def collect_source(self): # this is likely to be slow self.source = self.ctx.srcnode.ant_glob('**', excl=self.exclude_files) def get_build_command(self, props): params = self.get_build_params(props) + (self.ctx.cmd,) return "%s %s %s" % params def get_clean_command(self, props): return "" def get_rebuild_command(self, props): return self.get_build_command(props) class vsnode_target(vsnode_project): """ Visual studio project representing a targets (programs, libraries, etc) and bound to a task generator """ def __init__(self, ctx, tg): """ A project is more or less equivalent to a file/folder """ base = getattr(ctx, 'projects_dir', None) or tg.path node = base.make_node(quote(tg.name) + ctx.project_extension) # the project file as a Node vsnode_project.__init__(self, ctx, node) self.name = quote(tg.name) self.tg = tg # task generator def get_build_params(self, props): """ Override the default to add the target name """ opt = '--execsolution=%s' % self.ctx.get_solution_node().win32path() if getattr(self, 'tg', None): opt += " --targets=%s" % self.tg.name return (self.get_waf(), opt) def collect_source(self): tg = self.tg source_files = tg.to_nodes(getattr(tg, 'source', [])) include_dirs = Utils.to_list(getattr(tg, 'msvs_includes', [])) include_files = [] for x in include_dirs: if isinstance(x, str): x = tg.path.find_node(x) if x: lst = [y for y in x.ant_glob(HEADERS_GLOB, flat=False)] include_files.extend(lst) # remove duplicates self.source.extend(list(set(source_files + include_files))) self.source.sort(key=lambda x: x.win32path()) def collect_properties(self): """ Visual studio projects are associated with platforms and configurations (for building especially) """ super(vsnode_target, self).collect_properties() for x in self.build_properties: x.outdir = self.path.parent.win32path() x.preprocessor_definitions = '' x.includes_search_path = '' try: tsk = self.tg.link_task except AttributeError: pass else: x.output_file = tsk.outputs[0].win32path() x.preprocessor_definitions = ';'.join(tsk.env.DEFINES) x.includes_search_path = ';'.join(self.tg.env.INCPATHS) class msvs_generator(BuildContext): '''generates a visual studio 2010 solution''' cmd = 'msvs' fun = 'build' numver = '11.00' # Visual Studio Version Number vsver = '2010' # Visual Studio Version Year platform_toolset_ver = 'v110' # Platform Toolset Version Number def init(self): """ Some data that needs to be present """ if not getattr(self, 'configurations', None): self.configurations = ['Release'] # LocalRelease, RemoteDebug, etc if not getattr(self, 'platforms', None): self.platforms = ['Win32'] if not getattr(self, 'all_projects', None): self.all_projects = [] if not getattr(self, 'project_extension', None): self.project_extension = '.vcxproj' if not getattr(self, 'projects_dir', None): self.projects_dir = self.srcnode.make_node('.depproj') self.projects_dir.mkdir() # bind the classes to the object, so that subclass can provide custom generators if not getattr(self, 'vsnode_vsdir', None): self.vsnode_vsdir = vsnode_vsdir if not getattr(self, 'vsnode_target', None): self.vsnode_target = vsnode_target if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = vsnode_build_all if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = vsnode_install_all if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = vsnode_project_view self.numver = self.__class__.numver self.vsver = self.__class__.vsver self.platform_toolset_ver = self.__class__.platform_toolset_ver def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) # user initialization self.init() # two phases for creating the solution self.collect_projects() # add project objects into "self.all_projects" self.write_files() # write the corresponding project and solution files def collect_projects(self): """ Fill the list self.all_projects with project objects Fill the list of build targets """ self.collect_targets() self.add_aliases() self.collect_dirs() default_project = getattr(self, 'default_project', None) def sortfun(x): if x.name == default_project: return '' return getattr(x, 'path', None) and x.path.win32path() or x.name self.all_projects.sort(key=sortfun) def write_files(self): """ Write the project and solution files from the data collected so far. It is unlikely that you will want to change this """ for p in self.all_projects: p.write() # and finally write the solution file node = self.get_solution_node() node.parent.mkdir() Logs.warn('Creating %r', node) template1 = compile_template(SOLUTION_TEMPLATE) sln_str = template1(self) sln_str = rm_blank_lines(sln_str) node.stealth_write(sln_str) def get_solution_node(self): """ The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one """ try: return self.solution_node except AttributeError: pass solution_name = getattr(self, 'solution_name', None) if not solution_name: solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.sln' if os.path.isabs(solution_name): self.solution_node = self.root.make_node(solution_name) else: self.solution_node = self.srcnode.make_node(solution_name) return self.solution_node def project_configurations(self): """ Helper that returns all the pairs (config,platform) """ ret = [] for c in self.configurations: for p in self.platforms: ret.append((c, p)) return ret def collect_targets(self): """ Process the list of task generators """ for g in self.groups: for tg in g: if not isinstance(tg, TaskGen.task_gen): continue if not hasattr(tg, 'msvs_includes'): tg.msvs_includes = tg.to_list(getattr(tg, 'includes', [])) + tg.to_list(getattr(tg, 'export_includes', [])) tg.post() if not getattr(tg, 'link_task', None): continue p = self.vsnode_target(self, tg) p.collect_source() # delegate this processing p.collect_properties() self.all_projects.append(p) def add_aliases(self): """ Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7 We also add an alias for "make install" (disabled by default) """ base = getattr(self, 'projects_dir', None) or self.tg.path node_project = base.make_node('build_all_projects' + self.project_extension) # Node p_build = self.vsnode_build_all(self, node_project) p_build.collect_properties() self.all_projects.append(p_build) node_project = base.make_node('install_all_projects' + self.project_extension) # Node p_install = self.vsnode_install_all(self, node_project) p_install.collect_properties() self.all_projects.append(p_install) node_project = base.make_node('project_view' + self.project_extension) # Node p_view = self.vsnode_project_view(self, node_project) p_view.collect_source() p_view.collect_properties() self.all_projects.append(p_view) n = self.vsnode_vsdir(self, make_uuid(self.srcnode.win32path() + 'build_aliases'), "build_aliases") p_build.parent = p_install.parent = p_view.parent = n self.all_projects.append(n) def collect_dirs(self): """ Create the folder structure in the Visual studio project view """ seen = {} def make_parents(proj): # look at a project, try to make a parent if getattr(proj, 'parent', None): # aliases already have parents return x = proj.iter_path if x in seen: proj.parent = seen[x] return # There is not vsnode_vsdir for x. # So create a project representing the folder "x" n = proj.parent = seen[x] = self.vsnode_vsdir(self, make_uuid(x.win32path()), x.name) n.iter_path = x.parent self.all_projects.append(n) # recurse up to the project directory if x.height() > self.srcnode.height() + 1: make_parents(n) for p in self.all_projects[:]: # iterate over a copy of all projects if not getattr(p, 'tg', None): # but only projects that have a task generator continue # make a folder for each task generator p.iter_path = p.tg.path make_parents(p) def wrap_2008(cls): class dec(cls): def __init__(self, *k, **kw): cls.__init__(self, *k, **kw) self.project_template = PROJECT_2008_TEMPLATE def display_filter(self): root = build_property() root.subfilters = [] root.sourcefiles = [] root.source = [] root.name = '' @Utils.run_once def add_path(lst): if not lst: return root child = build_property() child.subfilters = [] child.sourcefiles = [] child.source = [] child.name = lst[-1] par = add_path(lst[:-1]) par.subfilters.append(child) return child for x in self.source: # this crap is for enabling subclasses to override get_filter_name tmp = self.get_filter_name(x.parent) tmp = tmp != '.' and tuple(tmp.split('\\')) or () par = add_path(tmp) par.source.append(x) def display(n): buf = [] for x in n.source: buf.append('\n' % (xml_escape(x.win32path()), self.get_key(x))) for x in n.subfilters: buf.append('' % xml_escape(x.name)) buf.append(display(x)) buf.append('') return '\n'.join(buf) return display(root) def get_key(self, node): """ If you do not want to let visual studio use the default file extensions, override this method to return a value: 0: C/C++ Code, 1: C++ Class, 2: C++ Header File, 3: C++ Form, 4: C++ Control, 5: Text File, 6: DEF File, 7: IDL File, 8: Makefile, 9: RGS File, 10: RC File, 11: RES File, 12: XSD File, 13: XML File, 14: HTML File, 15: CSS File, 16: Bitmap, 17: Icon, 18: Resx File, 19: BSC File, 20: XSX File, 21: C++ Web Service, 22: ASAX File, 23: Asp Page, 24: Document, 25: Discovery File, 26: C# File, 27: eFileTypeClassDiagram, 28: MHTML Document, 29: Property Sheet, 30: Cursor, 31: Manifest, 32: eFileTypeRDLC """ return '' def write(self): Logs.debug('msvs: creating %r', self.path) template1 = compile_template(self.project_template) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) return dec class msvs_2008_generator(msvs_generator): '''generates a visual studio 2008 solution''' cmd = 'msvs2008' fun = msvs_generator.fun numver = '10.00' vsver = '2008' def init(self): if not getattr(self, 'project_extension', None): self.project_extension = '_2008.vcproj' if not getattr(self, 'solution_name', None): self.solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '_2008.sln' if not getattr(self, 'vsnode_target', None): self.vsnode_target = wrap_2008(vsnode_target) if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = wrap_2008(vsnode_build_all) if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = wrap_2008(vsnode_install_all) if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = wrap_2008(vsnode_project_view) msvs_generator.init(self) def options(ctx): """ If the msvs option is used, try to detect if the build is made from visual studio """ ctx.add_option('--execsolution', action='store', help='when building with visual studio, use a build state file') old = BuildContext.execute def override_build_state(ctx): def lock(rm, add): uns = ctx.options.execsolution.replace('.sln', rm) uns = ctx.root.make_node(uns) try: uns.delete() except OSError: pass uns = ctx.options.execsolution.replace('.sln', add) uns = ctx.root.make_node(uns) try: uns.write('') except EnvironmentError: pass if ctx.options.execsolution: ctx.launch_dir = Context.top_dir # force a build for the whole project (invalid cwd when called by visual studio) lock('.lastbuildstate', '.unsuccessfulbuild') old(ctx) lock('.unsuccessfulbuild', '.lastbuildstate') else: old(ctx) BuildContext.execute = override_build_state ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/netcache_client.py0000660000000000000000000002160700000000000024403 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011-2015 (ita) """ A client for the network cache (playground/netcache/). Launch the server with: ./netcache_server, then use it for the builds by adding the following: def build(bld): bld.load('netcache_client') The parameters should be present in the environment in the form: NETCACHE=host:port waf configure build Or in a more detailed way: NETCACHE_PUSH=host:port NETCACHE_PULL=host:port waf configure build where: host: host where the server resides, by default localhost port: by default push on 11001 and pull on 12001 Use the server provided in playground/netcache/Netcache.java """ import os, socket, time, atexit, sys from waflib import Task, Logs, Utils, Build, Runner from waflib.Configure import conf BUF = 8192 * 16 HEADER_SIZE = 128 MODES = ['PUSH', 'PULL', 'PUSH_PULL'] STALE_TIME = 30 # seconds GET = 'GET' PUT = 'PUT' LST = 'LST' BYE = 'BYE' all_sigs_in_cache = (0.0, []) def put_data(conn, data): if sys.hexversion > 0x3000000: data = data.encode('latin-1') cnt = 0 while cnt < len(data): sent = conn.send(data[cnt:]) if sent == 0: raise RuntimeError('connection ended') cnt += sent push_connections = Runner.Queue(0) pull_connections = Runner.Queue(0) def get_connection(push=False): # return a new connection... do not forget to release it! try: if push: ret = push_connections.get(block=False) else: ret = pull_connections.get(block=False) except Exception: ret = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if push: ret.connect(Task.push_addr) else: ret.connect(Task.pull_addr) return ret def release_connection(conn, msg='', push=False): if conn: if push: push_connections.put(conn) else: pull_connections.put(conn) def close_connection(conn, msg=''): if conn: data = '%s,%s' % (BYE, msg) try: put_data(conn, data.ljust(HEADER_SIZE)) except: pass try: conn.close() except: pass def close_all(): for q in (push_connections, pull_connections): while q.qsize(): conn = q.get() try: close_connection(conn) except: # ignore errors when cleaning up pass atexit.register(close_all) def read_header(conn): cnt = 0 buf = [] while cnt < HEADER_SIZE: data = conn.recv(HEADER_SIZE - cnt) if not data: #import traceback #traceback.print_stack() raise ValueError('connection ended when reading a header %r' % buf) buf.append(data) cnt += len(data) if sys.hexversion > 0x3000000: ret = ''.encode('latin-1').join(buf) ret = ret.decode('latin-1') else: ret = ''.join(buf) return ret def check_cache(conn, ssig): """ List the files on the server, this is an optimization because it assumes that concurrent builds are rare """ global all_sigs_in_cache if not STALE_TIME: return if time.time() - all_sigs_in_cache[0] > STALE_TIME: params = (LST,'') put_data(conn, ','.join(params).ljust(HEADER_SIZE)) # read what is coming back ret = read_header(conn) size = int(ret.split(',')[0]) buf = [] cnt = 0 while cnt < size: data = conn.recv(min(BUF, size-cnt)) if not data: raise ValueError('connection ended %r %r' % (cnt, size)) buf.append(data) cnt += len(data) if sys.hexversion > 0x3000000: ret = ''.encode('latin-1').join(buf) ret = ret.decode('latin-1') else: ret = ''.join(buf) all_sigs_in_cache = (time.time(), ret.splitlines()) Logs.debug('netcache: server cache has %r entries', len(all_sigs_in_cache[1])) if not ssig in all_sigs_in_cache[1]: raise ValueError('no file %s in cache' % ssig) class MissingFile(Exception): pass def recv_file(conn, ssig, count, p): check_cache(conn, ssig) params = (GET, ssig, str(count)) put_data(conn, ','.join(params).ljust(HEADER_SIZE)) data = read_header(conn) size = int(data.split(',')[0]) if size == -1: raise MissingFile('no file %s - %s in cache' % (ssig, count)) # get the file, writing immediately # TODO a tmp file would be better f = open(p, 'wb') cnt = 0 while cnt < size: data = conn.recv(min(BUF, size-cnt)) if not data: raise ValueError('connection ended %r %r' % (cnt, size)) f.write(data) cnt += len(data) f.close() def sock_send(conn, ssig, cnt, p): #print "pushing %r %r %r" % (ssig, cnt, p) size = os.stat(p).st_size params = (PUT, ssig, str(cnt), str(size)) put_data(conn, ','.join(params).ljust(HEADER_SIZE)) f = open(p, 'rb') cnt = 0 while cnt < size: r = f.read(min(BUF, size-cnt)) while r: k = conn.send(r) if not k: raise ValueError('connection ended') cnt += k r = r[k:] def can_retrieve_cache(self): if not Task.pull_addr: return False if not self.outputs: return False self.cached = False cnt = 0 sig = self.signature() ssig = Utils.to_hex(self.uid() + sig) conn = None err = False try: try: conn = get_connection() for node in self.outputs: p = node.abspath() recv_file(conn, ssig, cnt, p) cnt += 1 except MissingFile as e: Logs.debug('netcache: file is not in the cache %r', e) err = True except Exception as e: Logs.debug('netcache: could not get the files %r', self.outputs) if Logs.verbose > 1: Logs.debug('netcache: exception %r', e) err = True # broken connection? remove this one close_connection(conn) conn = None else: Logs.debug('netcache: obtained %r from cache', self.outputs) finally: release_connection(conn) if err: return False self.cached = True return True @Utils.run_once def put_files_cache(self): if not Task.push_addr: return if not self.outputs: return if getattr(self, 'cached', None): return #print "called put_files_cache", id(self) bld = self.generator.bld sig = self.signature() ssig = Utils.to_hex(self.uid() + sig) conn = None cnt = 0 try: for node in self.outputs: # We could re-create the signature of the task with the signature of the outputs # in practice, this means hashing the output files # this is unnecessary try: if not conn: conn = get_connection(push=True) sock_send(conn, ssig, cnt, node.abspath()) Logs.debug('netcache: sent %r', node) except Exception as e: Logs.debug('netcache: could not push the files %r', e) # broken connection? remove this one close_connection(conn) conn = None cnt += 1 finally: release_connection(conn, push=True) bld.task_sigs[self.uid()] = self.cache_sig def hash_env_vars(self, env, vars_lst): # reimplement so that the resulting hash does not depend on local paths if not env.table: env = env.parent if not env: return Utils.SIG_NIL idx = str(id(env)) + str(vars_lst) try: cache = self.cache_env except AttributeError: cache = self.cache_env = {} else: try: return self.cache_env[idx] except KeyError: pass v = str([env[a] for a in vars_lst]) v = v.replace(self.srcnode.abspath().__repr__()[:-1], '') m = Utils.md5() m.update(v.encode()) ret = m.digest() Logs.debug('envhash: %r %r', ret, v) cache[idx] = ret return ret def uid(self): # reimplement so that the signature does not depend on local paths try: return self.uid_ except AttributeError: m = Utils.md5() src = self.generator.bld.srcnode up = m.update up(self.__class__.__name__.encode()) for x in self.inputs + self.outputs: up(x.path_from(src).encode()) self.uid_ = m.digest() return self.uid_ def make_cached(cls): if getattr(cls, 'nocache', None): return m1 = cls.run def run(self): if getattr(self, 'nocache', False): return m1(self) if self.can_retrieve_cache(): return 0 return m1(self) cls.run = run m2 = cls.post_run def post_run(self): if getattr(self, 'nocache', False): return m2(self) bld = self.generator.bld ret = m2(self) if bld.cache_global: self.put_files_cache() if hasattr(self, 'chmod'): for node in self.outputs: os.chmod(node.abspath(), self.chmod) return ret cls.post_run = post_run @conf def setup_netcache(ctx, push_addr, pull_addr): Task.Task.can_retrieve_cache = can_retrieve_cache Task.Task.put_files_cache = put_files_cache Task.Task.uid = uid Task.push_addr = push_addr Task.pull_addr = pull_addr Build.BuildContext.hash_env_vars = hash_env_vars ctx.cache_global = True for x in Task.classes.values(): make_cached(x) def build(bld): if not 'NETCACHE' in os.environ and not 'NETCACHE_PULL' in os.environ and not 'NETCACHE_PUSH' in os.environ: Logs.warn('Setting NETCACHE_PULL=127.0.0.1:11001 and NETCACHE_PUSH=127.0.0.1:12001') os.environ['NETCACHE_PULL'] = '127.0.0.1:12001' os.environ['NETCACHE_PUSH'] = '127.0.0.1:11001' if 'NETCACHE' in os.environ: if not 'NETCACHE_PUSH' in os.environ: os.environ['NETCACHE_PUSH'] = os.environ['NETCACHE'] if not 'NETCACHE_PULL' in os.environ: os.environ['NETCACHE_PULL'] = os.environ['NETCACHE'] v = os.environ['NETCACHE_PULL'] if v: h, p = v.split(':') pull_addr = (h, int(p)) else: pull_addr = None v = os.environ['NETCACHE_PUSH'] if v: h, p = v.split(':') push_addr = (h, int(p)) else: push_addr = None setup_netcache(bld, push_addr, pull_addr) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615842.2574158 tevent-0.11.0/third_party/waf/waflib/extras/objcopy.py0000660000000000000000000000352000000000000022732 0ustar00rootroot00000000000000#!/usr/bin/python # Grygoriy Fuchedzhy 2010 """ Support for converting linked targets to ihex, srec or binary files using objcopy. Use the 'objcopy' feature in conjunction with the 'cc' or 'cxx' feature. The 'objcopy' feature uses the following attributes: objcopy_bfdname Target object format name (eg. ihex, srec, binary). Defaults to ihex. objcopy_target File name used for objcopy output. This defaults to the target name with objcopy_bfdname as extension. objcopy_install_path Install path for objcopy_target file. Defaults to ${PREFIX}/fw. objcopy_flags Additional flags passed to objcopy. """ from waflib.Utils import def_attrs from waflib import Task, Options from waflib.TaskGen import feature, after_method class objcopy(Task.Task): run_str = '${OBJCOPY} -O ${TARGET_BFDNAME} ${OBJCOPYFLAGS} ${SRC} ${TGT}' color = 'CYAN' @feature('objcopy') @after_method('apply_link') def map_objcopy(self): def_attrs(self, objcopy_bfdname = 'ihex', objcopy_target = None, objcopy_install_path = "${PREFIX}/firmware", objcopy_flags = '') link_output = self.link_task.outputs[0] if not self.objcopy_target: self.objcopy_target = link_output.change_ext('.' + self.objcopy_bfdname).name task = self.create_task('objcopy', src=link_output, tgt=self.path.find_or_declare(self.objcopy_target)) task.env.append_unique('TARGET_BFDNAME', self.objcopy_bfdname) try: task.env.append_unique('OBJCOPYFLAGS', getattr(self, 'objcopy_flags')) except AttributeError: pass if self.objcopy_install_path: self.add_install_files(install_to=self.objcopy_install_path, install_from=task.outputs[0]) def configure(ctx): program_name = 'objcopy' prefix = getattr(Options.options, 'cross_prefix', None) if prefix: program_name = '{}-{}'.format(prefix, program_name) ctx.find_program(program_name, var='OBJCOPY', mandatory=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/ocaml.py0000660000000000000000000002247000000000000022365 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) "ocaml support" import os, re from waflib import Utils, Task from waflib.Logs import error from waflib.TaskGen import feature, before_method, after_method, extension EXT_MLL = ['.mll'] EXT_MLY = ['.mly'] EXT_MLI = ['.mli'] EXT_MLC = ['.c'] EXT_ML = ['.ml'] open_re = re.compile(r'^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M) foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M) def filter_comments(txt): meh = [0] def repl(m): if m.group(1): meh[0] += 1 elif m.group(2): meh[0] -= 1 elif not meh[0]: return m.group() return '' return foo.sub(repl, txt) def scan(self): node = self.inputs[0] code = filter_comments(node.read()) global open_re names = [] import_iterator = open_re.finditer(code) if import_iterator: for import_match in import_iterator: names.append(import_match.group(1)) found_lst = [] raw_lst = [] for name in names: nd = None for x in self.incpaths: nd = x.find_resource(name.lower()+'.ml') if not nd: nd = x.find_resource(name+'.ml') if nd: found_lst.append(nd) break else: raw_lst.append(name) return (found_lst, raw_lst) native_lst=['native', 'all', 'c_object'] bytecode_lst=['bytecode', 'all'] @feature('ocaml') def init_ml(self): Utils.def_attrs(self, type = 'all', incpaths_lst = [], bld_incpaths_lst = [], mlltasks = [], mlytasks = [], mlitasks = [], native_tasks = [], bytecode_tasks = [], linktasks = [], bytecode_env = None, native_env = None, compiled_tasks = [], includes = '', uselib = '', are_deps_set = 0) @feature('ocaml') @after_method('init_ml') def init_envs_ml(self): self.islibrary = getattr(self, 'islibrary', False) global native_lst, bytecode_lst self.native_env = None if self.type in native_lst: self.native_env = self.env.derive() if self.islibrary: self.native_env['OCALINKFLAGS'] = '-a' self.bytecode_env = None if self.type in bytecode_lst: self.bytecode_env = self.env.derive() if self.islibrary: self.bytecode_env['OCALINKFLAGS'] = '-a' if self.type == 'c_object': self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj') @feature('ocaml') @before_method('apply_vars_ml') @after_method('init_envs_ml') def apply_incpaths_ml(self): inc_lst = self.includes.split() lst = self.incpaths_lst for dir in inc_lst: node = self.path.find_dir(dir) if not node: error("node not found: " + str(dir)) continue if not node in lst: lst.append(node) self.bld_incpaths_lst.append(node) # now the nodes are added to self.incpaths_lst @feature('ocaml') @before_method('process_source') def apply_vars_ml(self): for i in self.incpaths_lst: if self.bytecode_env: app = self.bytecode_env.append_value app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()]) if self.native_env: app = self.native_env.append_value app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()]) varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT'] for name in self.uselib.split(): for vname in varnames: cnt = self.env[vname+'_'+name] if cnt: if self.bytecode_env: self.bytecode_env.append_value(vname, cnt) if self.native_env: self.native_env.append_value(vname, cnt) @feature('ocaml') @after_method('process_source') def apply_link_ml(self): if self.bytecode_env: ext = self.islibrary and '.cma' or '.run' linktask = self.create_task('ocalink') linktask.bytecode = 1 linktask.set_outputs(self.path.find_or_declare(self.target + ext)) linktask.env = self.bytecode_env self.linktasks.append(linktask) if self.native_env: if self.type == 'c_object': ext = '.o' elif self.islibrary: ext = '.cmxa' else: ext = '' linktask = self.create_task('ocalinkx') linktask.set_outputs(self.path.find_or_declare(self.target + ext)) linktask.env = self.native_env self.linktasks.append(linktask) # we produce a .o file to be used by gcc self.compiled_tasks.append(linktask) @extension(*EXT_MLL) def mll_hook(self, node): mll_task = self.create_task('ocamllex', node, node.change_ext('.ml')) mll_task.env = self.native_env.derive() self.mlltasks.append(mll_task) self.source.append(mll_task.outputs[0]) @extension(*EXT_MLY) def mly_hook(self, node): mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')]) mly_task.env = self.native_env.derive() self.mlytasks.append(mly_task) self.source.append(mly_task.outputs[0]) task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi')) task.env = self.native_env.derive() @extension(*EXT_MLI) def mli_hook(self, node): task = self.create_task('ocamlcmi', node, node.change_ext('.cmi')) task.env = self.native_env.derive() self.mlitasks.append(task) @extension(*EXT_MLC) def mlc_hook(self, node): task = self.create_task('ocamlcc', node, node.change_ext('.o')) task.env = self.native_env.derive() self.compiled_tasks.append(task) @extension(*EXT_ML) def ml_hook(self, node): if self.native_env: task = self.create_task('ocamlx', node, node.change_ext('.cmx')) task.env = self.native_env.derive() task.incpaths = self.bld_incpaths_lst self.native_tasks.append(task) if self.bytecode_env: task = self.create_task('ocaml', node, node.change_ext('.cmo')) task.env = self.bytecode_env.derive() task.bytecode = 1 task.incpaths = self.bld_incpaths_lst self.bytecode_tasks.append(task) def compile_may_start(self): if not getattr(self, 'flag_deps', ''): self.flag_deps = 1 # the evil part is that we can only compute the dependencies after the # source files can be read (this means actually producing the source files) if getattr(self, 'bytecode', ''): alltasks = self.generator.bytecode_tasks else: alltasks = self.generator.native_tasks self.signature() # ensure that files are scanned - unfortunately tree = self.generator.bld for node in self.inputs: lst = tree.node_deps[self.uid()] for depnode in lst: for t in alltasks: if t == self: continue if depnode in t.inputs: self.set_run_after(t) # TODO necessary to get the signature right - for now delattr(self, 'cache_sig') self.signature() return Task.Task.runnable_status(self) class ocamlx(Task.Task): """native caml compilation""" color = 'GREEN' run_str = '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}' scan = scan runnable_status = compile_may_start class ocaml(Task.Task): """bytecode caml compilation""" color = 'GREEN' run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}' scan = scan runnable_status = compile_may_start class ocamlcmi(Task.Task): """interface generator (the .i files?)""" color = 'BLUE' run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLINCLUDES} -o ${TGT} -c ${SRC}' before = ['ocamlcc', 'ocaml', 'ocamlcc'] class ocamlcc(Task.Task): """ocaml to c interfaces""" color = 'GREEN' run_str = 'cd ${TGT[0].bld_dir()} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${OCAMLINCLUDES} -c ${SRC[0].abspath()}' class ocamllex(Task.Task): """lexical generator""" color = 'BLUE' run_str = '${OCAMLLEX} ${SRC} -o ${TGT}' before = ['ocamlcmi', 'ocaml', 'ocamlcc'] class ocamlyacc(Task.Task): """parser generator""" color = 'BLUE' run_str = '${OCAMLYACC} -b ${tsk.base()} ${SRC}' before = ['ocamlcmi', 'ocaml', 'ocamlcc'] def base(self): node = self.outputs[0] s = os.path.splitext(node.name)[0] return node.bld_dir() + os.sep + s def link_may_start(self): if getattr(self, 'bytecode', 0): alltasks = self.generator.bytecode_tasks else: alltasks = self.generator.native_tasks for x in alltasks: if not x.hasrun: return Task.ASK_LATER if not getattr(self, 'order', ''): # now reorder the inputs given the task dependencies # this part is difficult, we do not have a total order on the tasks # if the dependencies are wrong, this may not stop seen = [] pendant = []+alltasks while pendant: task = pendant.pop(0) if task in seen: continue for x in task.run_after: if not x in seen: pendant.append(task) break else: seen.append(task) self.inputs = [x.outputs[0] for x in seen] self.order = 1 return Task.Task.runnable_status(self) class ocalink(Task.Task): """bytecode caml link""" color = 'YELLOW' run_str = '${OCAMLC} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS} ${SRC}' runnable_status = link_may_start after = ['ocaml', 'ocamlcc'] class ocalinkx(Task.Task): """native caml link""" color = 'YELLOW' run_str = '${OCAMLOPT} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS_OPT} ${SRC}' runnable_status = link_may_start after = ['ocamlx', 'ocamlcc'] def configure(conf): opt = conf.find_program('ocamlopt', var='OCAMLOPT', mandatory=False) occ = conf.find_program('ocamlc', var='OCAMLC', mandatory=False) if (not opt) or (not occ): conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH') v = conf.env v['OCAMLC'] = occ v['OCAMLOPT'] = opt v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX', mandatory=False) v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC', mandatory=False) v['OCAMLFLAGS'] = '' where = conf.cmd_and_log(conf.env.OCAMLC + ['-where']).strip()+os.sep v['OCAMLLIB'] = where v['LIBPATH_OCAML'] = where v['INCLUDES_OCAML'] = where v['LIB_OCAML'] = 'camlrun' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/package.py0000660000000000000000000000306600000000000022665 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 """ Obtain packages, unpack them in a location, and add associated uselib variables (CFLAGS_pkgname, LIBPATH_pkgname, etc). The default is use a Dependencies.txt file in the source directory. This is a work in progress. Usage: def options(opt): opt.load('package') def configure(conf): conf.load_packages() """ from waflib import Logs from waflib.Configure import conf try: from urllib import request except ImportError: from urllib import urlopen else: urlopen = request.urlopen CACHEVAR = 'WAFCACHE_PACKAGE' @conf def get_package_cache_dir(self): cache = None if CACHEVAR in conf.environ: cache = conf.environ[CACHEVAR] cache = self.root.make_node(cache) elif self.env[CACHEVAR]: cache = self.env[CACHEVAR] cache = self.root.make_node(cache) else: cache = self.srcnode.make_node('.wafcache_package') cache.mkdir() return cache @conf def download_archive(self, src, dst): for x in self.env.PACKAGE_REPO: url = '/'.join((x, src)) try: web = urlopen(url) try: if web.getcode() != 200: continue except AttributeError: pass except Exception: # on python3 urlopen throws an exception # python 2.3 does not have getcode and throws an exception to fail continue else: tmp = self.root.make_node(dst) tmp.write(web.read()) Logs.warn('Downloaded %s from %s', tmp.abspath(), url) break else: self.fatal('Could not get the package %s' % src) @conf def load_packages(self): self.get_package_cache_dir() # read the dependencies, get the archives, .. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/parallel_debug.py0000660000000000000000000002744300000000000024241 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2007-2010 (ita) """ Debugging helper for parallel compilation. Copy it to your project and load it with:: def options(opt): opt.load('parallel_debug', tooldir='.') def build(bld): ... The build will then output a file named pdebug.svg in the source directory. """ import re, sys, threading, time, traceback try: from Queue import Queue except: from queue import Queue from waflib import Runner, Options, Task, Logs, Errors SVG_TEMPLATE = """ ${if project.title} ${project.title} ${endif} ${for cls in project.groups} ${for rect in cls.rects} ${endfor} ${endfor} ${for info in project.infos} ${info.text} ${endfor} ${if project.tooltip} ${endif} """ COMPILE_TEMPLATE = '''def f(project): lst = [] def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") %s return ''.join(lst) ''' reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P[^}]*?)\})", re.M) def compile_template(line): extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return "\\" elif g('subst'): extr.append(g('code')) return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') assert(extr) indent = 0 buf = [] app = buf.append def app(txt): buf.append(indent * '\t' + txt) for x in range(len(extr)): if params[x]: app("lst.append(%r)" % params[x]) f = extr[x] if f.startswith(('if', 'for')): app(f + ':') indent += 1 elif f.startswith('py:'): app(f[3:]) elif f.startswith(('endif', 'endfor')): indent -= 1 elif f.startswith(('else', 'elif')): indent -= 1 app(f + ':') indent += 1 elif f.startswith('xml:'): app('lst.append(xml_escape(%s))' % f[4:]) else: #app('lst.append((%s) or "cannot find %s")' % (f, f)) app('lst.append(str(%s))' % f) if extr: if params[-1]: app("lst.append(%r)" % params[-1]) fun = COMPILE_TEMPLATE % "\n\t".join(buf) # uncomment the following to debug the template #for i, x in enumerate(fun.splitlines()): # print i, x return Task.funex(fun) # red #ff4d4d # green #4da74d # lila #a751ff color2code = { 'GREEN' : '#4da74d', 'YELLOW' : '#fefe44', 'PINK' : '#a751ff', 'RED' : '#cc1d1d', 'BLUE' : '#6687bb', 'CYAN' : '#34e2e2', } mp = {} info = [] # list of (text,color) def map_to_color(name): if name in mp: return mp[name] try: cls = Task.classes[name] except KeyError: return color2code['RED'] if cls.color in mp: return mp[cls.color] if cls.color in color2code: return color2code[cls.color] return color2code['RED'] def process(self): m = self.generator.bld.producer try: # TODO another place for this? del self.generator.bld.task_sigs[self.uid()] except KeyError: pass self.generator.bld.producer.set_running(1, self) try: ret = self.run() except Exception: self.err_msg = traceback.format_exc() self.hasrun = Task.EXCEPTION # TODO cleanup m.error_handler(self) return if ret: self.err_code = ret self.hasrun = Task.CRASHED else: try: self.post_run() except Errors.WafError: pass except Exception: self.err_msg = traceback.format_exc() self.hasrun = Task.EXCEPTION else: self.hasrun = Task.SUCCESS if self.hasrun != Task.SUCCESS: m.error_handler(self) self.generator.bld.producer.set_running(-1, self) Task.Task.process_back = Task.Task.process Task.Task.process = process old_start = Runner.Parallel.start def do_start(self): try: Options.options.dband except AttributeError: self.bld.fatal('use def options(opt): opt.load("parallel_debug")!') self.taskinfo = Queue() old_start(self) if self.dirty: make_picture(self) Runner.Parallel.start = do_start lock_running = threading.Lock() def set_running(self, by, tsk): with lock_running: try: cache = self.lock_cache except AttributeError: cache = self.lock_cache = {} i = 0 if by > 0: vals = cache.values() for i in range(self.numjobs): if i not in vals: cache[tsk] = i break else: i = cache[tsk] del cache[tsk] self.taskinfo.put( (i, id(tsk), time.time(), tsk.__class__.__name__, self.processed, self.count, by, ",".join(map(str, tsk.outputs))) ) Runner.Parallel.set_running = set_running def name2class(name): return name.replace(' ', '_').replace('.', '_') def make_picture(producer): # first, cast the parameters if not hasattr(producer.bld, 'path'): return tmp = [] try: while True: tup = producer.taskinfo.get(False) tmp.append(list(tup)) except: pass try: ini = float(tmp[0][2]) except: return if not info: seen = [] for x in tmp: name = x[3] if not name in seen: seen.append(name) else: continue info.append((name, map_to_color(name))) info.sort(key=lambda x: x[0]) thread_count = 0 acc = [] for x in tmp: thread_count += x[6] acc.append("%d %d %f %r %d %d %d %s" % (x[0], x[1], x[2] - ini, x[3], x[4], x[5], thread_count, x[7])) data_node = producer.bld.path.make_node('pdebug.dat') data_node.write('\n'.join(acc)) tmp = [lst[:2] + [float(lst[2]) - ini] + lst[3:] for lst in tmp] st = {} for l in tmp: if not l[0] in st: st[l[0]] = len(st.keys()) tmp = [ [st[lst[0]]] + lst[1:] for lst in tmp ] THREAD_AMOUNT = len(st.keys()) st = {} for l in tmp: if not l[1] in st: st[l[1]] = len(st.keys()) tmp = [ [lst[0]] + [st[lst[1]]] + lst[2:] for lst in tmp ] BAND = Options.options.dband seen = {} acc = [] for x in range(len(tmp)): line = tmp[x] id = line[1] if id in seen: continue seen[id] = True begin = line[2] thread_id = line[0] for y in range(x + 1, len(tmp)): line = tmp[y] if line[1] == id: end = line[2] #print id, thread_id, begin, end #acc.append( ( 10*thread_id, 10*(thread_id+1), 10*begin, 10*end ) ) acc.append( (BAND * begin, BAND*thread_id, BAND*end - BAND*begin, BAND, line[3], line[7]) ) break if Options.options.dmaxtime < 0.1: gwidth = 1 for x in tmp: m = BAND * x[2] if m > gwidth: gwidth = m else: gwidth = BAND * Options.options.dmaxtime ratio = float(Options.options.dwidth) / gwidth gwidth = Options.options.dwidth gheight = BAND * (THREAD_AMOUNT + len(info) + 1.5) # simple data model for our template class tobject(object): pass model = tobject() model.x = 0 model.y = 0 model.width = gwidth + 4 model.height = gheight + 4 model.tooltip = not Options.options.dnotooltip model.title = Options.options.dtitle model.title_x = gwidth / 2 model.title_y = gheight + - 5 groups = {} for (x, y, w, h, clsname, name) in acc: try: groups[clsname].append((x, y, w, h, name)) except: groups[clsname] = [(x, y, w, h, name)] # groups of rectangles (else js highlighting is slow) model.groups = [] for cls in groups: g = tobject() model.groups.append(g) g.classname = name2class(cls) g.rects = [] for (x, y, w, h, name) in groups[cls]: r = tobject() g.rects.append(r) r.x = 2 + x * ratio r.y = 2 + y r.width = w * ratio r.height = h r.name = name r.color = map_to_color(cls) cnt = THREAD_AMOUNT # caption model.infos = [] for (text, color) in info: inf = tobject() model.infos.append(inf) inf.classname = name2class(text) inf.x = 2 + BAND inf.y = 5 + (cnt + 0.5) * BAND inf.width = BAND/2 inf.height = BAND/2 inf.color = color inf.text = text inf.text_x = 2 + 2 * BAND inf.text_y = 5 + (cnt + 0.5) * BAND + 10 cnt += 1 # write the file... template1 = compile_template(SVG_TEMPLATE) txt = template1(model) node = producer.bld.path.make_node('pdebug.svg') node.write(txt) Logs.warn('Created the diagram %r', node) def options(opt): opt.add_option('--dtitle', action='store', default='Parallel build representation for %r' % ' '.join(sys.argv), help='title for the svg diagram', dest='dtitle') opt.add_option('--dwidth', action='store', type='int', help='diagram width', default=800, dest='dwidth') opt.add_option('--dtime', action='store', type='float', help='recording interval in seconds', default=0.009, dest='dtime') opt.add_option('--dband', action='store', type='int', help='band width', default=22, dest='dband') opt.add_option('--dmaxtime', action='store', type='float', help='maximum time, for drawing fair comparisons', default=0, dest='dmaxtime') opt.add_option('--dnotooltip', action='store_true', help='disable tooltips', default=False, dest='dnotooltip') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/pch.py0000660000000000000000000001044000000000000022036 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Alexander Afanasyev (UCLA), 2014 """ Enable precompiled C++ header support (currently only clang++ and g++ are supported) To use this tool, wscript should look like: def options(opt): opt.load('pch') # This will add `--with-pch` configure option. # Unless --with-pch during configure stage specified, the precompiled header support is disabled def configure(conf): conf.load('pch') # this will set conf.env.WITH_PCH if --with-pch is specified and the supported compiler is used # Unless conf.env.WITH_PCH is set, the precompiled header support is disabled def build(bld): bld(features='cxx pch', target='precompiled-headers', name='precompiled-headers', headers='a.h b.h c.h', # headers to pre-compile into `precompiled-headers` # Other parameters to compile precompiled headers # includes=..., # export_includes=..., # use=..., # ... # Exported parameters will be propagated even if precompiled headers are disabled ) bld( target='test', features='cxx cxxprogram', source='a.cpp b.cpp d.cpp main.cpp', use='precompiled-headers', ) # or bld( target='test', features='pch cxx cxxprogram', source='a.cpp b.cpp d.cpp main.cpp', headers='a.h b.h c.h', ) Note that precompiled header must have multiple inclusion guards. If the guards are missing, any benefit of precompiled header will be voided and compilation may fail in some cases. """ import os from waflib import Task, TaskGen, Utils from waflib.Tools import c_preproc, cxx PCH_COMPILER_OPTIONS = { 'clang++': [['-include'], '.pch', ['-x', 'c++-header']], 'g++': [['-include'], '.gch', ['-x', 'c++-header']], } def options(opt): opt.add_option('--without-pch', action='store_false', default=True, dest='with_pch', help='''Try to use precompiled header to speed up compilation (only g++ and clang++)''') def configure(conf): if (conf.options.with_pch and conf.env['COMPILER_CXX'] in PCH_COMPILER_OPTIONS.keys()): conf.env.WITH_PCH = True flags = PCH_COMPILER_OPTIONS[conf.env['COMPILER_CXX']] conf.env.CXXPCH_F = flags[0] conf.env.CXXPCH_EXT = flags[1] conf.env.CXXPCH_FLAGS = flags[2] @TaskGen.feature('pch') @TaskGen.before('process_source') def apply_pch(self): if not self.env.WITH_PCH: return if getattr(self.bld, 'pch_tasks', None) is None: self.bld.pch_tasks = {} if getattr(self, 'headers', None) is None: return self.headers = self.to_nodes(self.headers) if getattr(self, 'name', None): try: task = self.bld.pch_tasks[self.name] self.bld.fatal("Duplicated 'pch' task with name %r" % "%s.%s" % (self.name, self.idx)) except KeyError: pass out = '%s.%d%s' % (self.target, self.idx, self.env['CXXPCH_EXT']) out = self.path.find_or_declare(out) task = self.create_task('gchx', self.headers, out) # target should be an absolute path of `out`, but without precompiled header extension task.target = out.abspath()[:-len(out.suffix())] self.pch_task = task if getattr(self, 'name', None): self.bld.pch_tasks[self.name] = task @TaskGen.feature('cxx') @TaskGen.after_method('process_source', 'propagate_uselib_vars') def add_pch(self): if not (self.env['WITH_PCH'] and getattr(self, 'use', None) and getattr(self, 'compiled_tasks', None) and getattr(self.bld, 'pch_tasks', None)): return pch = None # find pch task, if any if getattr(self, 'pch_task', None): pch = self.pch_task else: for use in Utils.to_list(self.use): try: pch = self.bld.pch_tasks[use] except KeyError: pass if pch: for x in self.compiled_tasks: x.env.append_value('CXXFLAGS', self.env['CXXPCH_F'] + [pch.target]) class gchx(Task.Task): run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${CXXPCH_FLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXXPCH_F:SRC} ${CXX_SRC_F}${SRC[0].abspath()} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' scan = c_preproc.scan color = 'BLUE' ext_out=['.h'] def runnable_status(self): try: node_deps = self.generator.bld.node_deps[self.uid()] except KeyError: node_deps = [] ret = Task.Task.runnable_status(self) if ret == Task.SKIP_ME and self.env.CXX_NAME == 'clang': t = os.stat(self.outputs[0].abspath()).st_mtime for n in self.inputs + node_deps: if os.stat(n.abspath()).st_mtime > t: return Task.RUN_ME return ret ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/pep8.py0000660000000000000000000000662400000000000022151 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # # written by Sylvain Rouquette, 2011 ''' Install pep8 module: $ easy_install pep8 or $ pip install pep8 To add the pep8 tool to the waf file: $ ./waf-light --tools=compat15,pep8 or, if you have waf >= 1.6.2 $ ./waf update --files=pep8 Then add this to your wscript: [at]extension('.py', 'wscript') def run_pep8(self, node): self.create_task('Pep8', node) ''' import threading from waflib import Task, Options pep8 = __import__('pep8') class Pep8(Task.Task): color = 'PINK' lock = threading.Lock() def check_options(self): if pep8.options: return pep8.options = Options.options pep8.options.prog = 'pep8' excl = pep8.options.exclude.split(',') pep8.options.exclude = [s.rstrip('/') for s in excl] if pep8.options.filename: pep8.options.filename = pep8.options.filename.split(',') if pep8.options.select: pep8.options.select = pep8.options.select.split(',') else: pep8.options.select = [] if pep8.options.ignore: pep8.options.ignore = pep8.options.ignore.split(',') elif pep8.options.select: # Ignore all checks which are not explicitly selected pep8.options.ignore = [''] elif pep8.options.testsuite or pep8.options.doctest: # For doctest and testsuite, all checks are required pep8.options.ignore = [] else: # The default choice: ignore controversial checks pep8.options.ignore = pep8.DEFAULT_IGNORE.split(',') pep8.options.physical_checks = pep8.find_checks('physical_line') pep8.options.logical_checks = pep8.find_checks('logical_line') pep8.options.counters = dict.fromkeys(pep8.BENCHMARK_KEYS, 0) pep8.options.messages = {} def run(self): with Pep8.lock: self.check_options() pep8.input_file(self.inputs[0].abspath()) return 0 if not pep8.get_count() else -1 def options(opt): opt.add_option('-q', '--quiet', default=0, action='count', help="report only file names, or nothing with -qq") opt.add_option('-r', '--repeat', action='store_true', help="show all occurrences of the same error") opt.add_option('--exclude', metavar='patterns', default=pep8.DEFAULT_EXCLUDE, help="exclude files or directories which match these " "comma separated patterns (default: %s)" % pep8.DEFAULT_EXCLUDE, dest='exclude') opt.add_option('--filename', metavar='patterns', default='*.py', help="when parsing directories, only check filenames " "matching these comma separated patterns (default: " "*.py)") opt.add_option('--select', metavar='errors', default='', help="select errors and warnings (e.g. E,W6)") opt.add_option('--ignore', metavar='errors', default='', help="skip errors and warnings (e.g. E4,W)") opt.add_option('--show-source', action='store_true', help="show source code for each error") opt.add_option('--show-pep8', action='store_true', help="show text of PEP 8 for each error") opt.add_option('--statistics', action='store_true', help="count errors and warnings") opt.add_option('--count', action='store_true', help="print total number of errors and warnings " "to standard error and set exit code to 1 if " "total is not null") opt.add_option('--benchmark', action='store_true', help="measure processing speed") opt.add_option('--testsuite', metavar='dir', help="run regression tests from dir") opt.add_option('--doctest', action='store_true', help="run doctest on myself") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/pgicc.py0000660000000000000000000000331300000000000022352 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Antoine Dechaume 2011 """ Detect the PGI C compiler """ import sys, re from waflib import Errors from waflib.Configure import conf from waflib.Tools.compiler_c import c_compiler c_compiler['linux'].append('pgicc') @conf def find_pgi_compiler(conf, var, name): """ Find the program name, and execute it to ensure it really is itself. """ if sys.platform == 'cygwin': conf.fatal('The PGI compiler does not work on Cygwin') v = conf.env cc = None if v[var]: cc = v[var] elif var in conf.environ: cc = conf.environ[var] if not cc: cc = conf.find_program(name, var=var) if not cc: conf.fatal('PGI Compiler (%s) was not found' % name) v[var + '_VERSION'] = conf.get_pgi_version(cc) v[var] = cc v[var + '_NAME'] = 'pgi' @conf def get_pgi_version(conf, cc): """Find the version of a pgi compiler.""" version_re = re.compile(r"The Portland Group", re.I).search cmd = cc + ['-V', '-E'] # Issue 1078, prevent wrappers from linking try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find pgi compiler %r' % cmd) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not verify PGI signature') cmd = cc + ['-help=variable'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find pgi compiler %r' % cmd) version = re.findall(r'^COMPVER\s*=(.*)', out, re.M) if len(version) != 1: conf.fatal('Could not determine the compiler version') return version[0] def configure(conf): conf.find_pgi_compiler('CC', 'pgcc') conf.find_ar() conf.gcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/pgicxx.py0000660000000000000000000000061300000000000022567 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Antoine Dechaume 2011 """ Detect the PGI C++ compiler """ from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['linux'].append('pgicxx') from waflib.extras import pgicc def configure(conf): conf.find_pgi_compiler('CXX', 'pgCC') conf.find_ar() conf.gxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/proc.py0000660000000000000000000000327500000000000022237 0ustar00rootroot00000000000000#! /usr/bin/env python # per rosengren 2011 from os import environ, path from waflib import TaskGen, Utils def options(opt): grp = opt.add_option_group('Oracle ProC Options') grp.add_option('--oracle_home', action='store', default=environ.get('PROC_ORACLE'), help='Path to Oracle installation home (has bin/lib)') grp.add_option('--tns_admin', action='store', default=environ.get('TNS_ADMIN'), help='Directory containing server list (TNS_NAMES.ORA)') grp.add_option('--connection', action='store', default='dummy-user/dummy-password@dummy-server', help='Format: user/password@server') def configure(cnf): env = cnf.env if not env.PROC_ORACLE: env.PROC_ORACLE = cnf.options.oracle_home if not env.PROC_TNS_ADMIN: env.PROC_TNS_ADMIN = cnf.options.tns_admin if not env.PROC_CONNECTION: env.PROC_CONNECTION = cnf.options.connection cnf.find_program('proc', var='PROC', path_list=env.PROC_ORACLE + path.sep + 'bin') def proc(tsk): env = tsk.env gen = tsk.generator inc_nodes = gen.to_incnodes(Utils.to_list(getattr(gen,'includes',[])) + env['INCLUDES']) cmd = ( [env.PROC] + ['SQLCHECK=SEMANTICS'] + (['SYS_INCLUDE=(' + ','.join(env.PROC_INCLUDES) + ')'] if env.PROC_INCLUDES else []) + ['INCLUDE=(' + ','.join( [i.bldpath() for i in inc_nodes] ) + ')'] + ['userid=' + env.PROC_CONNECTION] + ['INAME=' + tsk.inputs[0].bldpath()] + ['ONAME=' + tsk.outputs[0].bldpath()] ) exec_env = { 'ORACLE_HOME': env.PROC_ORACLE, 'LD_LIBRARY_PATH': env.PROC_ORACLE + path.sep + 'lib', } if env.PROC_TNS_ADMIN: exec_env['TNS_ADMIN'] = env.PROC_TNS_ADMIN return tsk.exec_command(cmd, env=exec_env) TaskGen.declare_chain( name = 'proc', rule = proc, ext_in = '.pc', ext_out = '.c', ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/protoc.py0000660000000000000000000001533100000000000022576 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Philipp Bender, 2012 # Matt Clarkson, 2012 import re, os from waflib.Task import Task from waflib.TaskGen import extension from waflib import Errors, Context, Logs """ A simple tool to integrate protocol buffers into your build system. Example for C++: def configure(conf): conf.load('compiler_cxx cxx protoc') def build(bld): bld( features = 'cxx cxxprogram' source = 'main.cpp file1.proto proto/file2.proto', includes = '. proto', target = 'executable') Example for Python: def configure(conf): conf.load('python protoc') def build(bld): bld( features = 'py' source = 'main.py file1.proto proto/file2.proto', protoc_includes = 'proto') Example for both Python and C++ at same time: def configure(conf): conf.load('cxx python protoc') def build(bld): bld( features = 'cxx py' source = 'file1.proto proto/file2.proto', protoc_includes = 'proto') # or includes Example for Java: def options(opt): opt.load('java') def configure(conf): conf.load('python java protoc') # Here you have to point to your protobuf-java JAR and have it in classpath conf.env.CLASSPATH_PROTOBUF = ['protobuf-java-2.5.0.jar'] def build(bld): bld( features = 'javac protoc', name = 'pbjava', srcdir = 'inc/ src', # directories used by javac source = ['inc/message_inc.proto', 'inc/message.proto'], # source is used by protoc for .proto files use = 'PROTOBUF', protoc_includes = ['inc']) # for protoc to search dependencies Protoc includes passed via protoc_includes are either relative to the taskgen or to the project and are searched in this order. Include directories external to the waf project can also be passed to the extra by using protoc_extincludes protoc_extincludes = ['/usr/include/pblib'] Notes when using this tool: - protoc command line parsing is tricky. The generated files can be put in subfolders which depend on the order of the include paths. Try to be simple when creating task generators containing protoc stuff. """ class protoc(Task): run_str = '${PROTOC} ${PROTOC_FL:PROTOC_FLAGS} ${PROTOC_ST:INCPATHS} ${PROTOC_ST:PROTOC_INCPATHS} ${PROTOC_ST:PROTOC_EXTINCPATHS} ${SRC[0].bldpath()}' color = 'BLUE' ext_out = ['.h', 'pb.cc', '.py', '.java'] def scan(self): """ Scan .proto dependencies """ node = self.inputs[0] nodes = [] names = [] seen = [] search_nodes = [] if not node: return (nodes, names) if 'cxx' in self.generator.features: search_nodes = self.generator.includes_nodes if 'py' in self.generator.features or 'javac' in self.generator.features: for incpath in getattr(self.generator, 'protoc_includes', []): incpath_node = self.generator.path.find_node(incpath) if incpath_node: search_nodes.append(incpath_node) else: # Check if relative to top-level for extra tg dependencies incpath_node = self.generator.bld.path.find_node(incpath) if incpath_node: search_nodes.append(incpath_node) else: raise Errors.WafError('protoc: include path %r does not exist' % incpath) def parse_node(node): if node in seen: return seen.append(node) code = node.read().splitlines() for line in code: m = re.search(r'^import\s+"(.*)";.*(//)?.*', line) if m: dep = m.groups()[0] for incnode in search_nodes: found = incnode.find_resource(dep) if found: nodes.append(found) parse_node(found) else: names.append(dep) parse_node(node) # Add also dependencies path to INCPATHS so protoc will find the included file for deppath in nodes: self.env.append_unique('INCPATHS', deppath.parent.bldpath()) return (nodes, names) @extension('.proto') def process_protoc(self, node): incdirs = [] out_nodes = [] protoc_flags = [] # ensure PROTOC_FLAGS is a list; a copy is used below anyway self.env.PROTOC_FLAGS = self.to_list(self.env.PROTOC_FLAGS) if 'cxx' in self.features: cpp_node = node.change_ext('.pb.cc') hpp_node = node.change_ext('.pb.h') self.source.append(cpp_node) out_nodes.append(cpp_node) out_nodes.append(hpp_node) protoc_flags.append('--cpp_out=%s' % node.parent.get_bld().bldpath()) if 'py' in self.features: py_node = node.change_ext('_pb2.py') self.source.append(py_node) out_nodes.append(py_node) protoc_flags.append('--python_out=%s' % node.parent.get_bld().bldpath()) if 'javac' in self.features: # Make javac get also pick java code generated in build if not node.parent.get_bld() in self.javac_task.srcdir: self.javac_task.srcdir.append(node.parent.get_bld()) protoc_flags.append('--java_out=%s' % node.parent.get_bld().bldpath()) node.parent.get_bld().mkdir() tsk = self.create_task('protoc', node, out_nodes) tsk.env.append_value('PROTOC_FLAGS', protoc_flags) if 'javac' in self.features: self.javac_task.set_run_after(tsk) # Instruct protoc where to search for .proto included files. # For C++ standard include files dirs are used, # but this doesn't apply to Python for example for incpath in getattr(self, 'protoc_includes', []): incpath_node = self.path.find_node(incpath) if incpath_node: incdirs.append(incpath_node.bldpath()) else: # Check if relative to top-level for extra tg dependencies incpath_node = self.bld.path.find_node(incpath) if incpath_node: incdirs.append(incpath_node.bldpath()) else: raise Errors.WafError('protoc: include path %r does not exist' % incpath) tsk.env.PROTOC_INCPATHS = incdirs # Include paths external to the waf project (ie. shared pb repositories) tsk.env.PROTOC_EXTINCPATHS = getattr(self, 'protoc_extincludes', []) # PR2115: protoc generates output of .proto files in nested # directories by canonicalizing paths. To avoid this we have to pass # as first include the full directory file of the .proto file tsk.env.prepend_value('INCPATHS', node.parent.bldpath()) use = getattr(self, 'use', '') if not 'PROTOBUF' in use: self.use = self.to_list(use) + ['PROTOBUF'] def configure(conf): conf.check_cfg(package='protobuf', uselib_store='PROTOBUF', args=['--cflags', '--libs']) conf.find_program('protoc', var='PROTOC') conf.start_msg('Checking for protoc version') protocver = conf.cmd_and_log(conf.env.PROTOC + ['--version'], output=Context.BOTH) protocver = ''.join(protocver).strip()[protocver[0].rfind(' ')+1:] conf.end_msg(protocver) conf.env.PROTOC_MAJOR = protocver[:protocver.find('.')] conf.env.PROTOC_ST = '-I%s' conf.env.PROTOC_FL = '%s' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/pyqt5.py0000660000000000000000000001610700000000000022354 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Federico Pellegrin, 2016-2019 (fedepell) adapted for Python """ This tool helps with finding Python Qt5 tools and libraries, and provides translation from QT5 files to Python code. The following snippet illustrates the tool usage:: def options(opt): opt.load('py pyqt5') def configure(conf): conf.load('py pyqt5') def build(bld): bld( features = 'py pyqt5', source = 'main.py textures.qrc aboutDialog.ui', ) Here, the UI description and resource files will be processed to generate code. Usage ===== Load the "pyqt5" tool. Add into the sources list also the qrc resources files or ui5 definition files and they will be translated into python code with the system tools (PyQt5, PySide2, PyQt4 are searched in this order) and then compiled """ try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True import os from waflib.Tools import python from waflib import Task, Options from waflib.TaskGen import feature, extension from waflib.Configure import conf from waflib import Logs EXT_RCC = ['.qrc'] """ File extension for the resource (.qrc) files """ EXT_UI = ['.ui'] """ File extension for the user interface (.ui) files """ class XMLHandler(ContentHandler): """ Parses ``.qrc`` files """ def __init__(self): self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(str(''.join(self.buf))) def characters(self, cars): self.buf.append(cars) @extension(*EXT_RCC) def create_pyrcc_task(self, node): "Creates rcc and py task for ``.qrc`` files" rcnode = node.change_ext('.py') self.create_task('pyrcc', node, rcnode) if getattr(self, 'install_from', None): self.install_from = self.install_from.get_bld() else: self.install_from = self.path.get_bld() self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') self.process_py(rcnode) @extension(*EXT_UI) def create_pyuic_task(self, node): "Create uic tasks and py for user interface ``.ui`` definition files" uinode = node.change_ext('.py') self.create_task('ui5py', node, uinode) if getattr(self, 'install_from', None): self.install_from = self.install_from.get_bld() else: self.install_from = self.path.get_bld() self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') self.process_py(uinode) @extension('.ts') def add_pylang(self, node): """Adds all the .ts file into ``self.lang``""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('pyqt5') def apply_pyqt5(self): """ The additional parameters are: :param lang: list of translation files (\\*.ts) to process :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension """ if getattr(self, 'lang', None): qmtasks = [] for x in self.to_list(self.lang): if isinstance(x, str): x = self.path.find_resource(x + '.ts') qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm'))) if getattr(self, 'langname', None): qmnodes = [k.outputs[0] for k in qmtasks] rcnode = self.langname if isinstance(rcnode, str): rcnode = self.path.find_or_declare(rcnode + '.qrc') t = self.create_task('qm2rcc', qmnodes, rcnode) create_pyrcc_task(self, t.outputs[0]) class pyrcc(Task.Task): """ Processes ``.qrc`` files """ color = 'BLUE' run_str = '${QT_PYRCC} ${SRC} -o ${TGT}' ext_out = ['.py'] def rcname(self): return os.path.splitext(self.inputs[0].name)[0] def scan(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') return ([], []) parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) fi = open(self.inputs[0].abspath(), 'r') try: parser.parse(fi) finally: fi.close() nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) class ui5py(Task.Task): """ Processes ``.ui`` files for python """ color = 'BLUE' run_str = '${QT_PYUIC} ${SRC} -o ${TGT}' ext_out = ['.py'] class ts2qm(Task.Task): """ Generates ``.qm`` files from ``.ts`` files """ color = 'BLUE' run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(Task.Task): """ Generates ``.qrc`` files from ``.qm`` files """ color = 'BLUE' after = 'ts2qm' def run(self): """Create a qrc file including the inputs""" txt = '\n'.join(['%s' % k.path_from(self.outputs[0].parent) for k in self.inputs]) code = '\n\n%s\n\n' % txt self.outputs[0].write(code) def configure(self): self.find_pyqt5_binaries() # warn about this during the configuration too if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') @conf def find_pyqt5_binaries(self): """ Detects PyQt5 or PySide2 programs such as pyuic5/pyside2-uic, pyrcc5/pyside2-rcc """ env = self.env if getattr(Options.options, 'want_pyqt5', True): self.find_program(['pyuic5'], var='QT_PYUIC') self.find_program(['pyrcc5'], var='QT_PYRCC') self.find_program(['pylupdate5'], var='QT_PYLUPDATE') elif getattr(Options.options, 'want_pyside2', True): self.find_program(['pyside2-uic'], var='QT_PYUIC') self.find_program(['pyside2-rcc'], var='QT_PYRCC') self.find_program(['pyside2-lupdate'], var='QT_PYLUPDATE') elif getattr(Options.options, 'want_pyqt4', True): self.find_program(['pyuic4'], var='QT_PYUIC') self.find_program(['pyrcc4'], var='QT_PYRCC') self.find_program(['pylupdate4'], var='QT_PYLUPDATE') else: self.find_program(['pyuic5','pyside2-uic','pyuic4'], var='QT_PYUIC') self.find_program(['pyrcc5','pyside2-rcc','pyrcc4'], var='QT_PYRCC') self.find_program(['pylupdate5', 'pyside2-lupdate','pylupdate4'], var='QT_PYLUPDATE') if not env.QT_PYUIC: self.fatal('cannot find the uic compiler for python for qt5') if not env.QT_PYRCC: self.fatal('cannot find the rcc compiler for python for qt5') self.find_program(['lrelease-qt5', 'lrelease'], var='QT_LRELEASE') def options(opt): """ Command-line options """ pyqt5opt=opt.add_option_group("Python QT5 Options") pyqt5opt.add_option('--pyqt5-pyqt5', action='store_true', default=False, dest='want_pyqt5', help='use PyQt5 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') pyqt5opt.add_option('--pyqt5-pyside2', action='store_true', default=False, dest='want_pyside2', help='use PySide2 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') pyqt5opt.add_option('--pyqt5-pyqt4', action='store_true', default=False, dest='want_pyqt4', help='use PyQt4 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1611563707.5457177 tevent-0.11.0/third_party/waf/waflib/extras/pytest.py0000660000000000000000000002124200000000000022616 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Calle Rosenquist, 2016-2018 (xbreak) """ Provides Python unit test support using :py:class:`waflib.Tools.waf_unit_test.utest` task via the **pytest** feature. To use pytest the following is needed: 1. Load `pytest` and the dependency `waf_unit_test` tools. 2. Create a task generator with feature `pytest` (not `test`) and customize behaviour with the following attributes: - `pytest_source`: Test input files. - `ut_str`: Test runner command, e.g. ``${PYTHON} -B -m unittest discover`` or if nose is used: ``${NOSETESTS} --no-byte-compile ${SRC}``. - `ut_shell`: Determines if ``ut_str`` is executed in a shell. Default: False. - `ut_cwd`: Working directory for test runner. Defaults to directory of first ``pytest_source`` file. Additionally the following `pytest` specific attributes are used in dependent taskgens: - `pytest_path`: Node or string list of additional Python paths. - `pytest_libpath`: Node or string list of additional library paths. The `use` dependencies are used for both update calculation and to populate the following environment variables for the `pytest` test runner: 1. `PYTHONPATH` (`sys.path`) of any dependent taskgen that has the feature `py`: - `install_from` attribute is used to determine where the root of the Python sources are located. If `install_from` is not specified the default is to use the taskgen path as the root. - `pytest_path` attribute is used to manually specify additional Python paths. 2. Dynamic linker search path variable (e.g. `LD_LIBRARY_PATH`) of any dependent taskgen with non-static link_task. - `pytest_libpath` attribute is used to manually specify additional linker paths. 3. Java class search path (CLASSPATH) of any Java/Javalike dependency Note: `pytest` cannot automatically determine the correct `PYTHONPATH` for `pyext` taskgens because the extension might be part of a Python package or used standalone: - When used as part of another `py` package, the `PYTHONPATH` is provided by that taskgen so no additional action is required. - When used as a standalone module, the user needs to specify the `PYTHONPATH` explicitly via the `pytest_path` attribute on the `pyext` taskgen. For details c.f. the pytest playground examples. For example:: # A standalone Python C extension that demonstrates unit test environment population # of PYTHONPATH and LD_LIBRARY_PATH/PATH/DYLD_LIBRARY_PATH. # # Note: `pytest_path` is provided here because pytest cannot automatically determine # if the extension is part of another Python package or is used standalone. bld(name = 'foo_ext', features = 'c cshlib pyext', source = 'src/foo_ext.c', target = 'foo_ext', pytest_path = [ bld.path.get_bld() ]) # Python package under test that also depend on the Python module `foo_ext` # # Note: `install_from` is added automatically to `PYTHONPATH`. bld(name = 'foo', features = 'py', use = 'foo_ext', source = bld.path.ant_glob('src/foo/*.py'), install_from = 'src') # Unit test example using the built in module unittest and let that discover # any test cases. bld(name = 'foo_test', features = 'pytest', use = 'foo', pytest_source = bld.path.ant_glob('test/*.py'), ut_str = '${PYTHON} -B -m unittest discover') """ import os from waflib import Task, TaskGen, Errors, Utils, Logs from waflib.Tools import ccroot def _process_use_rec(self, name): """ Recursively process ``use`` for task generator with name ``name``.. Used by pytest_process_use. """ if name in self.pytest_use_not or name in self.pytest_use_seen: return try: tg = self.bld.get_tgen_by_name(name) except Errors.WafError: self.pytest_use_not.add(name) return self.pytest_use_seen.append(name) tg.post() for n in self.to_list(getattr(tg, 'use', [])): _process_use_rec(self, n) @TaskGen.feature('pytest') @TaskGen.after_method('process_source', 'apply_link') def pytest_process_use(self): """ Process the ``use`` attribute which contains a list of task generator names and store paths that later is used to populate the unit test runtime environment. """ self.pytest_use_not = set() self.pytest_use_seen = [] self.pytest_paths = [] # strings or Nodes self.pytest_libpaths = [] # strings or Nodes self.pytest_javapaths = [] # strings or Nodes self.pytest_dep_nodes = [] names = self.to_list(getattr(self, 'use', [])) for name in names: _process_use_rec(self, name) def extend_unique(lst, varlst): ext = [] for x in varlst: if x not in lst: ext.append(x) lst.extend(ext) # Collect type specific info needed to construct a valid runtime environment # for the test. for name in self.pytest_use_seen: tg = self.bld.get_tgen_by_name(name) extend_unique(self.pytest_paths, Utils.to_list(getattr(tg, 'pytest_path', []))) extend_unique(self.pytest_libpaths, Utils.to_list(getattr(tg, 'pytest_libpath', []))) if 'py' in tg.features: # Python dependencies are added to PYTHONPATH pypath = getattr(tg, 'install_from', tg.path) if 'buildcopy' in tg.features: # Since buildcopy is used we assume that PYTHONPATH in build should be used, # not source extend_unique(self.pytest_paths, [pypath.get_bld().abspath()]) # Add buildcopy output nodes to dependencies extend_unique(self.pytest_dep_nodes, [o for task in getattr(tg, 'tasks', []) \ for o in getattr(task, 'outputs', [])]) else: # If buildcopy is not used, depend on sources instead extend_unique(self.pytest_dep_nodes, tg.source) extend_unique(self.pytest_paths, [pypath.abspath()]) if 'javac' in tg.features: # If a JAR is generated point to that, otherwise to directory if getattr(tg, 'jar_task', None): extend_unique(self.pytest_javapaths, [tg.jar_task.outputs[0].abspath()]) else: extend_unique(self.pytest_javapaths, [tg.path.get_bld()]) # And add respective dependencies if present if tg.use_lst: extend_unique(self.pytest_javapaths, tg.use_lst) if getattr(tg, 'link_task', None): # For tasks with a link_task (C, C++, D et.c.) include their library paths: if not isinstance(tg.link_task, ccroot.stlink_task): extend_unique(self.pytest_dep_nodes, tg.link_task.outputs) extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH) if 'pyext' in tg.features: # If the taskgen is extending Python we also want to add the interpreter libpath. extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH_PYEXT) else: # Only add to libpath if the link task is not a Python extension extend_unique(self.pytest_libpaths, [tg.link_task.outputs[0].parent.abspath()]) @TaskGen.feature('pytest') @TaskGen.after_method('pytest_process_use') def make_pytest(self): """ Creates a ``utest`` task with a populated environment for Python if not specified in ``ut_env``: - Paths in `pytest_paths` attribute are used to populate PYTHONPATH - Paths in `pytest_libpaths` attribute are used to populate the system library path (e.g. LD_LIBRARY_PATH) """ nodes = self.to_nodes(self.pytest_source) tsk = self.create_task('utest', nodes) tsk.dep_nodes.extend(self.pytest_dep_nodes) if getattr(self, 'ut_str', None): self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) tsk.vars = lst + tsk.vars if getattr(self, 'ut_cwd', None): if isinstance(self.ut_cwd, str): # we want a Node instance if os.path.isabs(self.ut_cwd): self.ut_cwd = self.bld.root.make_node(self.ut_cwd) else: self.ut_cwd = self.path.make_node(self.ut_cwd) else: if tsk.inputs: self.ut_cwd = tsk.inputs[0].parent else: raise Errors.WafError("no valid input files for pytest task, check pytest_source value") if not self.ut_cwd.exists(): self.ut_cwd.mkdir() if not hasattr(self, 'ut_env'): self.ut_env = dict(os.environ) def add_paths(var, lst): # Add list of paths to a variable, lst can contain strings or nodes lst = [ str(n) for n in lst ] Logs.debug("ut: %s: Adding paths %s=%s", self, var, lst) self.ut_env[var] = os.pathsep.join(lst) + os.pathsep + self.ut_env.get(var, '') # Prepend dependency paths to PYTHONPATH, CLASSPATH and LD_LIBRARY_PATH add_paths('PYTHONPATH', self.pytest_paths) add_paths('CLASSPATH', self.pytest_javapaths) if Utils.is_win32: add_paths('PATH', self.pytest_libpaths) elif Utils.unversioned_sys_platform() == 'darwin': add_paths('DYLD_LIBRARY_PATH', self.pytest_libpaths) add_paths('LD_LIBRARY_PATH', self.pytest_libpaths) else: add_paths('LD_LIBRARY_PATH', self.pytest_libpaths) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/qnxnto.py0000660000000000000000000000356100000000000022621 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Jérôme Carretero 2011 (zougloub) # QNX neutrino compatibility functions import sys, os from waflib import Utils class Popen(object): """ Popen cannot work on QNX from a threaded program: Forking in threads is not implemented in neutrino. Python's os.popen / spawn / fork won't work when running in threads (they will if in the main program thread) In waf, this happens mostly in build. And the use cases can be replaced by os.system() calls. """ __slots__ = ["prog", "kw", "popen", "verbose"] verbose = 0 def __init__(self, prog, **kw): try: self.prog = prog self.kw = kw self.popen = None if Popen.verbose: sys.stdout.write("Popen created: %r, kw=%r..." % (prog, kw)) do_delegate = kw.get('stdout') == -1 and kw.get('stderr') == -1 if do_delegate: if Popen.verbose: print("Delegating to real Popen") self.popen = self.real_Popen(prog, **kw) else: if Popen.verbose: print("Emulating") except Exception as e: if Popen.verbose: print("Exception: %s" % e) raise def __getattr__(self, name): if Popen.verbose: sys.stdout.write("Getattr: %s..." % name) if name in Popen.__slots__: return object.__getattribute__(self, name) else: if self.popen is not None: if Popen.verbose: print("from Popen") return getattr(self.popen, name) else: if name == "wait": return self.emu_wait else: raise Exception("subprocess emulation: not implemented: %s" % name) def emu_wait(self): if Popen.verbose: print("emulated wait (%r kw=%r)" % (self.prog, self.kw)) if isinstance(self.prog, str): cmd = self.prog else: cmd = " ".join(self.prog) if 'cwd' in self.kw: cmd = 'cd "%s" && %s' % (self.kw['cwd'], cmd) return os.system(cmd) if sys.platform == "qnx6": Popen.real_Popen = Utils.subprocess.Popen Utils.subprocess.Popen = Popen ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/qt4.py0000660000000000000000000004752200000000000022007 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Tool Description ================ This tool helps with finding Qt4 tools and libraries, and also provides syntactic sugar for using Qt4 tools. The following snippet illustrates the tool usage:: def options(opt): opt.load('compiler_cxx qt4') def configure(conf): conf.load('compiler_cxx qt4') def build(bld): bld( features = 'qt4 cxx cxxprogram', uselib = 'QTCORE QTGUI QTOPENGL QTSVG', source = 'main.cpp textures.qrc aboutDialog.ui', target = 'window', ) Here, the UI description and resource files will be processed to generate code. Usage ===== Load the "qt4" tool. You also need to edit your sources accordingly: - the normal way of doing things is to have your C++ files include the .moc file. This is regarded as the best practice (and provides much faster compilations). It also implies that the include paths have beenset properly. - to have the include paths added automatically, use the following:: from waflib.TaskGen import feature, before_method, after_method @feature('cxx') @after_method('process_source') @before_method('apply_incpaths') def add_includes_paths(self): incs = set(self.to_list(getattr(self, 'includes', ''))) for x in self.compiled_tasks: incs.add(x.inputs[0].parent.path_from(self.path)) self.includes = sorted(incs) Note: another tool provides Qt processing that does not require .moc includes, see 'playground/slow_qt/'. A few options (--qt{dir,bin,...}) and environment variables (QT4_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool, tool path selection, etc; please read the source for more info. """ try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True import os, sys from waflib.Tools import cxx from waflib import Task, Utils, Options, Errors, Context from waflib.TaskGen import feature, after_method, extension from waflib.Configure import conf from waflib import Logs MOC_H = ['.h', '.hpp', '.hxx', '.hh'] """ File extensions associated to the .moc files """ EXT_RCC = ['.qrc'] """ File extension for the resource (.qrc) files """ EXT_UI = ['.ui'] """ File extension for the user interface (.ui) files """ EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C'] """ File extensions of C++ files that may require a .moc processing """ QT4_LIBS = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtXmlPatterns QtWebKit Qt3Support QtHelp QtScript QtDeclarative QtDesigner" class qxx(Task.classes['cxx']): """ Each C++ file can have zero or several .moc files to create. They are known only when the files are scanned (preprocessor) To avoid scanning the c++ files each time (parsing C/C++), the results are retrieved from the task cache (bld.node_deps/bld.raw_deps). The moc tasks are also created *dynamically* during the build. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.moc_done = 0 def runnable_status(self): """ Compute the task signature to make sure the scanner was executed. Create the moc tasks by using :py:meth:`waflib.Tools.qt4.qxx.add_moc_tasks` (if necessary), then postpone the task execution (there is no need to recompute the task signature). """ if self.moc_done: return Task.Task.runnable_status(self) else: for t in self.run_after: if not t.hasrun: return Task.ASK_LATER self.add_moc_tasks() return Task.Task.runnable_status(self) def create_moc_task(self, h_node, m_node): """ If several libraries use the same classes, it is possible that moc will run several times (Issue 1318) It is not possible to change the file names, but we can assume that the moc transformation will be identical, and the moc tasks can be shared in a global cache. The defines passed to moc will then depend on task generator order. If this is not acceptable, then use the tool slow_qt4 instead (and enjoy the slow builds... :-( ) """ try: moc_cache = self.generator.bld.moc_cache except AttributeError: moc_cache = self.generator.bld.moc_cache = {} try: return moc_cache[h_node] except KeyError: tsk = moc_cache[h_node] = Task.classes['moc'](env=self.env, generator=self.generator) tsk.set_inputs(h_node) tsk.set_outputs(m_node) if self.generator: self.generator.tasks.append(tsk) # direct injection in the build phase (safe because called from the main thread) gen = self.generator.bld.producer gen.outstanding.append(tsk) gen.total += 1 return tsk def moc_h_ext(self): ext = [] try: ext = Options.options.qt_header_ext.split() except AttributeError: pass if not ext: ext = MOC_H return ext def add_moc_tasks(self): """ Create the moc tasks by looking in ``bld.raw_deps[self.uid()]`` """ node = self.inputs[0] bld = self.generator.bld try: # compute the signature once to know if there is a moc file to create self.signature() except KeyError: # the moc file may be referenced somewhere else pass else: # remove the signature, it must be recomputed with the moc task delattr(self, 'cache_sig') include_nodes = [node.parent] + self.generator.includes_nodes moctasks = [] mocfiles = set() for d in bld.raw_deps.get(self.uid(), []): if not d.endswith('.moc'): continue # process that base.moc only once if d in mocfiles: continue mocfiles.add(d) # find the source associated with the moc file h_node = None base2 = d[:-4] for x in include_nodes: for e in self.moc_h_ext(): h_node = x.find_node(base2 + e) if h_node: break if h_node: m_node = h_node.change_ext('.moc') break else: # foo.cpp -> foo.cpp.moc for k in EXT_QT4: if base2.endswith(k): for x in include_nodes: h_node = x.find_node(base2) if h_node: break if h_node: m_node = h_node.change_ext(k + '.moc') break if not h_node: raise Errors.WafError('No source found for %r which is a moc file' % d) # create the moc task task = self.create_moc_task(h_node, m_node) moctasks.append(task) # simple scheduler dependency: run the moc task before others self.run_after.update(set(moctasks)) self.moc_done = 1 class trans_update(Task.Task): """Update a .ts files from a list of C++ files""" run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}' color = 'BLUE' class XMLHandler(ContentHandler): """ Parser for *.qrc* files """ def __init__(self): self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(str(''.join(self.buf))) def characters(self, cars): self.buf.append(cars) @extension(*EXT_RCC) def create_rcc_task(self, node): "Create rcc and cxx tasks for *.qrc* files" rcnode = node.change_ext('_rc.cpp') self.create_task('rcc', node, rcnode) cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) try: self.compiled_tasks.append(cpptask) except AttributeError: self.compiled_tasks = [cpptask] return cpptask @extension(*EXT_UI) def create_uic_task(self, node): "hook for uic tasks" uictask = self.create_task('ui4', node) uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])] @extension('.ts') def add_lang(self, node): """add all the .ts file into self.lang""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('qt4') @after_method('apply_link') def apply_qt4(self): """ Add MOC_FLAGS which may be necessary for moc:: def build(bld): bld.program(features='qt4', source='main.cpp', target='app', use='QTCORE') The additional parameters are: :param lang: list of translation files (\\*.ts) to process :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension :param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**) :type update: bool :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension """ if getattr(self, 'lang', None): qmtasks = [] for x in self.to_list(self.lang): if isinstance(x, str): x = self.path.find_resource(x + '.ts') qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm'))) if getattr(self, 'update', None) and Options.options.trans_qt4: cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [ a.inputs[0] for a in self.tasks if getattr(a, 'inputs', None) and a.inputs[0].name.endswith('.ui')] for x in qmtasks: self.create_task('trans_update', cxxnodes, x.inputs) if getattr(self, 'langname', None): qmnodes = [x.outputs[0] for x in qmtasks] rcnode = self.langname if isinstance(rcnode, str): rcnode = self.path.find_or_declare(rcnode + '.qrc') t = self.create_task('qm2rcc', qmnodes, rcnode) k = create_rcc_task(self, t.outputs[0]) self.link_task.inputs.append(k.outputs[0]) lst = [] for flag in self.to_list(self.env['CXXFLAGS']): if len(flag) < 2: continue f = flag[0:2] if f in ('-D', '-I', '/D', '/I'): if (f[0] == '/'): lst.append('-' + flag[1:]) else: lst.append(flag) self.env.append_value('MOC_FLAGS', lst) @extension(*EXT_QT4) def cxx_hook(self, node): """ Re-map C++ file extensions to the :py:class:`waflib.Tools.qt4.qxx` task. """ return self.create_compiled_task('qxx', node) class rcc(Task.Task): """ Process *.qrc* files """ color = 'BLUE' run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' ext_out = ['.h'] def rcname(self): return os.path.splitext(self.inputs[0].name)[0] def scan(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('no xml support was found, the rcc dependencies will be incomplete!') return ([], []) parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) fi = open(self.inputs[0].abspath(), 'r') try: parser.parse(fi) finally: fi.close() nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) class moc(Task.Task): """ Create *.moc* files """ color = 'BLUE' run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}' def keyword(self): return "Creating" def __str__(self): return self.outputs[0].path_from(self.generator.bld.launch_node()) class ui4(Task.Task): """ Process *.ui* files """ color = 'BLUE' run_str = '${QT_UIC} ${SRC} -o ${TGT}' ext_out = ['.h'] class ts2qm(Task.Task): """ Create *.qm* files from *.ts* files """ color = 'BLUE' run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(Task.Task): """ Transform *.qm* files into *.rc* files """ color = 'BLUE' after = 'ts2qm' def run(self): """Create a qrc file including the inputs""" txt = '\n'.join(['%s' % k.path_from(self.outputs[0].parent) for k in self.inputs]) code = '\n\n%s\n\n' % txt self.outputs[0].write(code) def configure(self): """ Besides the configuration options, the environment variable QT4_ROOT may be used to give the location of the qt4 libraries (absolute path). The detection will use the program *pkg-config* through :py:func:`waflib.Tools.config_c.check_cfg` """ self.find_qt4_binaries() self.set_qt4_libs_to_check() self.set_qt4_defines() self.find_qt4_libraries() self.add_qt4_rpath() self.simplify_qt4_libs() @conf def find_qt4_binaries(self): env = self.env opt = Options.options qtdir = getattr(opt, 'qtdir', '') qtbin = getattr(opt, 'qtbin', '') paths = [] if qtdir: qtbin = os.path.join(qtdir, 'bin') # the qt directory has been given from QT4_ROOT - deduce the qt binary path if not qtdir: qtdir = os.environ.get('QT4_ROOT', '') qtbin = os.environ.get('QT4_BIN') or os.path.join(qtdir, 'bin') if qtbin: paths = [qtbin] # no qtdir, look in the path and in /usr/local/Trolltech if not qtdir: paths = os.environ.get('PATH', '').split(os.pathsep) paths.append('/usr/share/qt4/bin/') try: lst = Utils.listdir('/usr/local/Trolltech/') except OSError: pass else: if lst: lst.sort() lst.reverse() # keep the highest version qtdir = '/usr/local/Trolltech/%s/' % lst[0] qtbin = os.path.join(qtdir, 'bin') paths.append(qtbin) # at the end, try to find qmake in the paths given # keep the one with the highest version cand = None prev_ver = ['4', '0', '0'] for qmk in ('qmake-qt4', 'qmake4', 'qmake'): try: qmake = self.find_program(qmk, path_list=paths) except self.errors.ConfigurationError: pass else: try: version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip() except self.errors.WafError: pass else: if version: new_ver = version.split('.') if new_ver > prev_ver: cand = qmake prev_ver = new_ver if cand: self.env.QMAKE = cand else: self.fatal('Could not find qmake for qt4') qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_BINS']).strip() + os.sep def find_bin(lst, var): if var in env: return for f in lst: try: ret = self.find_program(f, path_list=paths) except self.errors.ConfigurationError: pass else: env[var]=ret break find_bin(['uic-qt3', 'uic3'], 'QT_UIC3') find_bin(['uic-qt4', 'uic'], 'QT_UIC') if not env.QT_UIC: self.fatal('cannot find the uic compiler for qt4') self.start_msg('Checking for uic version') uicver = self.cmd_and_log(env.QT_UIC + ["-version"], output=Context.BOTH) uicver = ''.join(uicver).strip() uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '') self.end_msg(uicver) if uicver.find(' 3.') != -1: self.fatal('this uic compiler is for qt3, add uic for qt4 to your path') find_bin(['moc-qt4', 'moc'], 'QT_MOC') find_bin(['rcc-qt4', 'rcc'], 'QT_RCC') find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE') find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE') env['UIC3_ST']= '%s -o %s' env['UIC_ST'] = '%s -o %s' env['MOC_ST'] = '-o' env['ui_PATTERN'] = 'ui_%s.h' env['QT_LRELEASE_FLAGS'] = ['-silent'] env.MOCCPPPATH_ST = '-I%s' env.MOCDEFINES_ST = '-D%s' @conf def find_qt4_libraries(self): qtlibs = getattr(Options.options, 'qtlibs', None) or os.environ.get("QT4_LIBDIR") if not qtlibs: try: qtlibs = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip() except Errors.WafError: qtdir = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip() + os.sep qtlibs = os.path.join(qtdir, 'lib') self.msg('Found the Qt4 libraries in', qtlibs) qtincludes = os.environ.get("QT4_INCLUDES") or self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_HEADERS']).strip() env = self.env if not 'PKG_CONFIG_PATH' in os.environ: os.environ['PKG_CONFIG_PATH'] = '%s:%s/pkgconfig:/usr/lib/qt4/lib/pkgconfig:/opt/qt4/lib/pkgconfig:/usr/lib/qt4/lib:/opt/qt4/lib' % (qtlibs, qtlibs) try: if os.environ.get("QT4_XCOMPILE"): raise self.errors.ConfigurationError() self.check_cfg(atleast_pkgconfig_version='0.1') except self.errors.ConfigurationError: for i in self.qt4_vars: uselib = i.upper() if Utils.unversioned_sys_platform() == "darwin": # Since at least qt 4.7.3 each library locates in separate directory frameworkName = i + ".framework" qtDynamicLib = os.path.join(qtlibs, frameworkName, i) if os.path.exists(qtDynamicLib): env.append_unique('FRAMEWORK_' + uselib, i) self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('INCLUDES_' + uselib, os.path.join(qtlibs, frameworkName, 'Headers')) elif env.DEST_OS != "win32": qtDynamicLib = os.path.join(qtlibs, "lib" + i + ".so") qtStaticLib = os.path.join(qtlibs, "lib" + i + ".a") if os.path.exists(qtDynamicLib): env.append_unique('LIB_' + uselib, i) self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') elif os.path.exists(qtStaticLib): env.append_unique('LIB_' + uselib, i) self.msg('Checking for %s' % i, qtStaticLib, 'GREEN') else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_' + uselib, qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) else: # Release library names are like QtCore4 for k in ("lib%s.a", "lib%s4.a", "%s.lib", "%s4.lib"): lib = os.path.join(qtlibs, k % i) if os.path.exists(lib): env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')]) self.msg('Checking for %s' % i, lib, 'GREEN') break else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_' + uselib, qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) # Debug library names are like QtCore4d uselib = i.upper() + "_debug" for k in ("lib%sd.a", "lib%sd4.a", "%sd.lib", "%sd4.lib"): lib = os.path.join(qtlibs, k % i) if os.path.exists(lib): env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')]) self.msg('Checking for %s' % i, lib, 'GREEN') break else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_' + uselib, qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) else: for i in self.qt4_vars_debug + self.qt4_vars: self.check_cfg(package=i, args='--cflags --libs', mandatory=False) @conf def simplify_qt4_libs(self): # the libpaths make really long command-lines # remove the qtcore ones from qtgui, etc env = self.env def process_lib(vars_, coreval): for d in vars_: var = d.upper() if var == 'QTCORE': continue value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if lib in core: continue accu.append(lib) env['LIBPATH_'+var] = accu process_lib(self.qt4_vars, 'LIBPATH_QTCORE') process_lib(self.qt4_vars_debug, 'LIBPATH_QTCORE_DEBUG') @conf def add_qt4_rpath(self): # rpath if wanted env = self.env if getattr(Options.options, 'want_rpath', False): def process_rpath(vars_, coreval): for d in vars_: var = d.upper() value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if var != 'QTCORE': if lib in core: continue accu.append('-Wl,--rpath='+lib) env['RPATH_'+var] = accu process_rpath(self.qt4_vars, 'LIBPATH_QTCORE') process_rpath(self.qt4_vars_debug, 'LIBPATH_QTCORE_DEBUG') @conf def set_qt4_libs_to_check(self): if not hasattr(self, 'qt4_vars'): self.qt4_vars = QT4_LIBS self.qt4_vars = Utils.to_list(self.qt4_vars) if not hasattr(self, 'qt4_vars_debug'): self.qt4_vars_debug = [a + '_debug' for a in self.qt4_vars] self.qt4_vars_debug = Utils.to_list(self.qt4_vars_debug) @conf def set_qt4_defines(self): if sys.platform != 'win32': return for x in self.qt4_vars: y = x[2:].upper() self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y) self.env.append_unique('DEFINES_%s_DEBUG' % x.upper(), 'QT_%s_LIB' % y) def options(opt): """ Command-line options """ opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries') opt.add_option('--header-ext', type='string', default='', help='header extension for moc files', dest='qt_header_ext') for i in 'qtdir qtbin qtlibs'.split(): opt.add_option('--'+i, type='string', default='', dest=i) opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/relocation.py0000660000000000000000000000433300000000000023427 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Waf 1.6 Try to detect if the project directory was relocated, and if it was, change the node representing the project directory. Just call: waf configure build Note that if the project directory name changes, the signatures for the tasks using files in that directory will change, causing a partial build. """ import os from waflib import Build, ConfigSet, Task, Utils, Errors from waflib.TaskGen import feature, after_method EXTRA_LOCK = '.old_srcdir' old1 = Build.BuildContext.store def store(self): old1(self) db = os.path.join(self.variant_dir, EXTRA_LOCK) env = ConfigSet.ConfigSet() env.SRCDIR = self.srcnode.abspath() env.store(db) Build.BuildContext.store = store old2 = Build.BuildContext.init_dirs def init_dirs(self): if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): raise Errors.WafError('The project was not configured: run "waf configure" first!') srcdir = None db = os.path.join(self.variant_dir, EXTRA_LOCK) env = ConfigSet.ConfigSet() try: env.load(db) srcdir = env.SRCDIR except: pass if srcdir: d = self.root.find_node(srcdir) if d and srcdir != self.top_dir and getattr(d, 'children', ''): srcnode = self.root.make_node(self.top_dir) print("relocating the source directory %r -> %r" % (srcdir, self.top_dir)) srcnode.children = {} for (k, v) in d.children.items(): srcnode.children[k] = v v.parent = srcnode d.children = {} old2(self) Build.BuildContext.init_dirs = init_dirs def uid(self): try: return self.uid_ except AttributeError: # this is not a real hot zone, but we want to avoid surprises here m = Utils.md5() up = m.update up(self.__class__.__name__.encode()) for x in self.inputs + self.outputs: up(x.path_from(x.ctx.srcnode).encode()) self.uid_ = m.digest() return self.uid_ Task.Task.uid = uid @feature('c', 'cxx', 'd', 'go', 'asm', 'fc', 'includes') @after_method('propagate_uselib_vars', 'process_source') def apply_incpaths(self): lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES']) self.includes_nodes = lst bld = self.bld self.env['INCPATHS'] = [x.is_child_of(bld.srcnode) and x.path_from(bld.bldnode) or x.abspath() for x in lst] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/remote.py0000660000000000000000000002307200000000000022564 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Remote Builds tool using rsync+ssh __author__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2013" """ Simple Remote Builds ******************** This tool is an *experimental* tool (meaning, do not even try to pollute the waf bug tracker with bugs in here, contact me directly) providing simple remote builds. It uses rsync and ssh to perform the remote builds. It is intended for performing cross-compilation on platforms where a cross-compiler is either unavailable (eg. MacOS, QNX) a specific product does not exist (eg. Windows builds using Visual Studio) or simply not installed. This tool sends the sources and the waf script to the remote host, and commands the usual waf execution. There are alternatives to using this tool, such as setting up shared folders, logging on to remote machines, and building on the shared folders. Electing one method or another depends on the size of the program. Usage ===== 1. Set your wscript file so it includes a list of variants, e.g.:: from waflib import Utils top = '.' out = 'build' variants = [ 'linux_64_debug', 'linux_64_release', 'linux_32_debug', 'linux_32_release', ] from waflib.extras import remote def options(opt): # normal stuff from here on opt.load('compiler_c') def configure(conf): if not conf.variant: return # normal stuff from here on conf.load('compiler_c') def build(bld): if not bld.variant: return # normal stuff from here on bld(features='c cprogram', target='app', source='main.c') 2. Build the waf file, so it includes this tool, and put it in the current directory .. code:: bash ./waf-light --tools=remote 3. Set the host names to access the hosts: .. code:: bash export REMOTE_QNX=user@kiunix 4. Setup the ssh server and ssh keys The ssh key should not be protected by a password, or it will prompt for it every time. Create the key on the client: .. code:: bash ssh-keygen -t rsa -f foo.rsa Then copy foo.rsa.pub to the remote machine (user@kiunix:/home/user/.ssh/authorized_keys), and make sure the permissions are correct (chmod go-w ~ ~/.ssh ~/.ssh/authorized_keys) A separate key for the build processes can be set in the environment variable WAF_SSH_KEY. The tool will then use 'ssh-keyscan' to avoid prompting for remote hosts, so be warned to use this feature on internal networks only (MITM). .. code:: bash export WAF_SSH_KEY=~/foo.rsa 5. Perform the build: .. code:: bash waf configure_all build_all --remote """ import getpass, os, re, sys from collections import OrderedDict from waflib import Context, Options, Utils, ConfigSet from waflib.Build import BuildContext, CleanContext, InstallContext, UninstallContext from waflib.Configure import ConfigurationContext is_remote = False if '--remote' in sys.argv: is_remote = True sys.argv.remove('--remote') class init(Context.Context): """ Generates the *_all commands """ cmd = 'init' fun = 'init' def execute(self): for x in list(Context.g_module.variants): self.make_variant(x) lst = ['remote'] for k in Options.commands: if k.endswith('_all'): name = k.replace('_all', '') for x in Context.g_module.variants: lst.append('%s_%s' % (name, x)) else: lst.append(k) del Options.commands[:] Options.commands += lst def make_variant(self, x): for y in (BuildContext, CleanContext, InstallContext, UninstallContext): name = y.__name__.replace('Context','').lower() class tmp(y): cmd = name + '_' + x fun = 'build' variant = x class tmp(ConfigurationContext): cmd = 'configure_' + x fun = 'configure' variant = x def __init__(self, **kw): ConfigurationContext.__init__(self, **kw) self.setenv(x) class remote(BuildContext): cmd = 'remote' fun = 'build' def get_ssh_hosts(self): lst = [] for v in Context.g_module.variants: self.env.HOST = self.login_to_host(self.variant_to_login(v)) cmd = Utils.subst_vars('${SSH_KEYSCAN} -t rsa,ecdsa ${HOST}', self.env) out, err = self.cmd_and_log(cmd, output=Context.BOTH, quiet=Context.BOTH) lst.append(out.strip()) return lst def setup_private_ssh_key(self): """ When WAF_SSH_KEY points to a private key, a .ssh directory will be created in the build directory Make sure that the ssh key does not prompt for a password """ key = os.environ.get('WAF_SSH_KEY', '') if not key: return if not os.path.isfile(key): self.fatal('Key in WAF_SSH_KEY must point to a valid file') self.ssh_dir = os.path.join(self.path.abspath(), 'build', '.ssh') self.ssh_hosts = os.path.join(self.ssh_dir, 'known_hosts') self.ssh_key = os.path.join(self.ssh_dir, os.path.basename(key)) self.ssh_config = os.path.join(self.ssh_dir, 'config') for x in self.ssh_hosts, self.ssh_key, self.ssh_config: if not os.path.isfile(x): if not os.path.isdir(self.ssh_dir): os.makedirs(self.ssh_dir) Utils.writef(self.ssh_key, Utils.readf(key), 'wb') os.chmod(self.ssh_key, 448) Utils.writef(self.ssh_hosts, '\n'.join(self.get_ssh_hosts())) os.chmod(self.ssh_key, 448) Utils.writef(self.ssh_config, 'UserKnownHostsFile %s' % self.ssh_hosts, 'wb') os.chmod(self.ssh_config, 448) self.env.SSH_OPTS = ['-F', self.ssh_config, '-i', self.ssh_key] self.env.append_value('RSYNC_SEND_OPTS', '--exclude=build/.ssh') def skip_unbuildable_variant(self): # skip variants that cannot be built on this OS for k in Options.commands: a, _, b = k.partition('_') if b in Context.g_module.variants: c, _, _ = b.partition('_') if c != Utils.unversioned_sys_platform(): Options.commands.remove(k) def login_to_host(self, login): return re.sub(r'(\w+@)', '', login) def variant_to_login(self, variant): """linux_32_debug -> search env.LINUX_32 and then env.LINUX""" x = variant[:variant.rfind('_')] ret = os.environ.get('REMOTE_' + x.upper(), '') if not ret: x = x[:x.find('_')] ret = os.environ.get('REMOTE_' + x.upper(), '') if not ret: ret = '%s@localhost' % getpass.getuser() return ret def execute(self): global is_remote if not is_remote: self.skip_unbuildable_variant() else: BuildContext.execute(self) def restore(self): self.top_dir = os.path.abspath(Context.g_module.top) self.srcnode = self.root.find_node(self.top_dir) self.path = self.srcnode self.out_dir = os.path.join(self.top_dir, Context.g_module.out) self.bldnode = self.root.make_node(self.out_dir) self.bldnode.mkdir() self.env = ConfigSet.ConfigSet() def extract_groups_of_builds(self): """Return a dict mapping each variants to the commands to build""" self.vgroups = {} for x in reversed(Options.commands): _, _, variant = x.partition('_') if variant in Context.g_module.variants: try: dct = self.vgroups[variant] except KeyError: dct = self.vgroups[variant] = OrderedDict() try: dct[variant].append(x) except KeyError: dct[variant] = [x] Options.commands.remove(x) def custom_options(self, login): try: return Context.g_module.host_options[login] except (AttributeError, KeyError): return {} def recurse(self, *k, **kw): self.env.RSYNC = getattr(Context.g_module, 'rsync', 'rsync -a --chmod=u+rwx') self.env.SSH = getattr(Context.g_module, 'ssh', 'ssh') self.env.SSH_KEYSCAN = getattr(Context.g_module, 'ssh_keyscan', 'ssh-keyscan') try: self.env.WAF = getattr(Context.g_module, 'waf') except AttributeError: try: os.stat('waf') except KeyError: self.fatal('Put a waf file in the directory (./waf-light --tools=remote)') else: self.env.WAF = './waf' self.extract_groups_of_builds() self.setup_private_ssh_key() for k, v in self.vgroups.items(): task = self(rule=rsync_and_ssh, always=True) task.env.login = self.variant_to_login(k) task.env.commands = [] for opt, value in v.items(): task.env.commands += value task.env.variant = task.env.commands[0].partition('_')[2] for opt, value in self.custom_options(k): task.env[opt] = value self.jobs = len(self.vgroups) def make_mkdir_command(self, task): return Utils.subst_vars('${SSH} ${SSH_OPTS} ${login} "rm -fr ${remote_dir} && mkdir -p ${remote_dir}"', task.env) def make_send_command(self, task): return Utils.subst_vars('${RSYNC} ${RSYNC_SEND_OPTS} -e "${SSH} ${SSH_OPTS}" ${local_dir} ${login}:${remote_dir}', task.env) def make_exec_command(self, task): txt = '''${SSH} ${SSH_OPTS} ${login} "cd ${remote_dir} && ${WAF} ${commands}"''' return Utils.subst_vars(txt, task.env) def make_save_command(self, task): return Utils.subst_vars('${RSYNC} ${RSYNC_SAVE_OPTS} -e "${SSH} ${SSH_OPTS}" ${login}:${remote_dir_variant} ${build_dir}', task.env) def rsync_and_ssh(task): # remove a warning task.uid_ = id(task) bld = task.generator.bld task.env.user, _, _ = task.env.login.partition('@') task.env.hdir = Utils.to_hex(Utils.h_list((task.generator.path.abspath(), task.env.variant))) task.env.remote_dir = '~%s/wafremote/%s' % (task.env.user, task.env.hdir) task.env.local_dir = bld.srcnode.abspath() + '/' task.env.remote_dir_variant = '%s/%s/%s' % (task.env.remote_dir, Context.g_module.out, task.env.variant) task.env.build_dir = bld.bldnode.abspath() ret = task.exec_command(bld.make_mkdir_command(task)) if ret: return ret ret = task.exec_command(bld.make_send_command(task)) if ret: return ret ret = task.exec_command(bld.make_exec_command(task)) if ret: return ret ret = task.exec_command(bld.make_save_command(task)) if ret: return ret ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0252094 tevent-0.11.0/third_party/waf/waflib/extras/resx.py0000660000000000000000000000203200000000000022243 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 import os from waflib import Task from waflib.TaskGen import extension def configure(conf): conf.find_program(['resgen'], var='RESGEN') conf.env.RESGENFLAGS = '/useSourcePath' @extension('.resx') def resx_file(self, node): """ Bind the .resx extension to a resgen task """ if not getattr(self, 'cs_task', None): self.bld.fatal('resx_file has no link task for use %r' % self) # Given assembly 'Foo' and file 'Sub/Dir/File.resx', create 'Foo.Sub.Dir.File.resources' assembly = getattr(self, 'namespace', os.path.splitext(self.gen)[0]) res = os.path.splitext(node.path_from(self.path))[0].replace('/', '.').replace('\\', '.') out = self.path.find_or_declare(assembly + '.' + res + '.resources') tsk = self.create_task('resgen', node, out) self.cs_task.dep_nodes.extend(tsk.outputs) # dependency self.env.append_value('RESOURCES', tsk.outputs[0].bldpath()) class resgen(Task.Task): """ Compile C# resource files """ color = 'YELLOW' run_str = '${RESGEN} ${RESGENFLAGS} ${SRC} ${TGT}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/review.py0000660000000000000000000002160200000000000022567 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Laurent Birtz, 2011 # moved the code into a separate tool (ita) """ There are several things here: - a different command-line option management making options persistent - the review command to display the options set Assumptions: - configuration options are not always added to the right group (and do not count on the users to do it...) - the options are persistent between the executions (waf options are NOT persistent by design), even for the configuration - when the options change, the build is invalidated (forcing a reconfiguration) """ import os, textwrap, shutil from waflib import Logs, Context, ConfigSet, Options, Build, Configure class Odict(dict): """Ordered dictionary""" def __init__(self, data=None): self._keys = [] dict.__init__(self) if data: # we were provided a regular dict if isinstance(data, dict): self.append_from_dict(data) # we were provided a tuple list elif type(data) == list: self.append_from_plist(data) # we were provided invalid input else: raise Exception("expected a dict or a tuple list") def append_from_dict(self, dict): map(self.__setitem__, dict.keys(), dict.values()) def append_from_plist(self, plist): for pair in plist: if len(pair) != 2: raise Exception("invalid pairs list") for (k, v) in plist: self.__setitem__(k, v) def __delitem__(self, key): if not key in self._keys: raise KeyError(key) dict.__delitem__(self, key) self._keys.remove(key) def __setitem__(self, key, item): dict.__setitem__(self, key, item) if key not in self._keys: self._keys.append(key) def clear(self): dict.clear(self) self._keys = [] def copy(self): return Odict(self.plist()) def items(self): return zip(self._keys, self.values()) def keys(self): return list(self._keys) # return a copy of the list def values(self): return map(self.get, self._keys) def plist(self): p = [] for k, v in self.items(): p.append( (k, v) ) return p def __str__(self): buf = [] buf.append("{ ") for k, v in self.items(): buf.append('%r : %r, ' % (k, v)) buf.append("}") return ''.join(buf) review_options = Odict() """ Ordered dictionary mapping configuration option names to their optparse option. """ review_defaults = {} """ Dictionary mapping configuration option names to their default value. """ old_review_set = None """ Review set containing the configuration values before parsing the command line. """ new_review_set = None """ Review set containing the configuration values after parsing the command line. """ class OptionsReview(Options.OptionsContext): def __init__(self, **kw): super(self.__class__, self).__init__(**kw) def prepare_config_review(self): """ Find the configuration options that are reviewable, detach their default value from their optparse object and store them into the review dictionaries. """ gr = self.get_option_group('configure options') for opt in gr.option_list: if opt.action != 'store' or opt.dest in ("out", "top"): continue review_options[opt.dest] = opt review_defaults[opt.dest] = opt.default if gr.defaults.has_key(opt.dest): del gr.defaults[opt.dest] opt.default = None def parse_args(self): self.prepare_config_review() self.parser.get_option('--prefix').help = 'installation prefix' super(OptionsReview, self).parse_args() Context.create_context('review').refresh_review_set() class ReviewContext(Context.Context): '''reviews the configuration values''' cmd = 'review' def __init__(self, **kw): super(self.__class__, self).__init__(**kw) out = Options.options.out if not out: out = getattr(Context.g_module, Context.OUT, None) if not out: out = Options.lockfile.replace('.lock-waf', '') self.build_path = (os.path.isabs(out) and self.root or self.path).make_node(out).abspath() """Path to the build directory""" self.cache_path = os.path.join(self.build_path, Build.CACHE_DIR) """Path to the cache directory""" self.review_path = os.path.join(self.cache_path, 'review.cache') """Path to the review cache file""" def execute(self): """ Display and store the review set. Invalidate the cache as required. """ if not self.compare_review_set(old_review_set, new_review_set): self.invalidate_cache() self.store_review_set(new_review_set) print(self.display_review_set(new_review_set)) def invalidate_cache(self): """Invalidate the cache to prevent bad builds.""" try: Logs.warn("Removing the cached configuration since the options have changed") shutil.rmtree(self.cache_path) except: pass def refresh_review_set(self): """ Obtain the old review set and the new review set, and import the new set. """ global old_review_set, new_review_set old_review_set = self.load_review_set() new_review_set = self.update_review_set(old_review_set) self.import_review_set(new_review_set) def load_review_set(self): """ Load and return the review set from the cache if it exists. Otherwise, return an empty set. """ if os.path.isfile(self.review_path): return ConfigSet.ConfigSet(self.review_path) return ConfigSet.ConfigSet() def store_review_set(self, review_set): """ Store the review set specified in the cache. """ if not os.path.isdir(self.cache_path): os.makedirs(self.cache_path) review_set.store(self.review_path) def update_review_set(self, old_set): """ Merge the options passed on the command line with those imported from the previous review set and return the corresponding preview set. """ # Convert value to string. It's important that 'None' maps to # the empty string. def val_to_str(val): if val == None or val == '': return '' return str(val) new_set = ConfigSet.ConfigSet() opt_dict = Options.options.__dict__ for name in review_options.keys(): # the option is specified explicitly on the command line if name in opt_dict: # if the option is the default, pretend it was never specified if val_to_str(opt_dict[name]) != val_to_str(review_defaults[name]): new_set[name] = opt_dict[name] # the option was explicitly specified in a previous command elif name in old_set: new_set[name] = old_set[name] return new_set def import_review_set(self, review_set): """ Import the actual value of the reviewable options in the option dictionary, given the current review set. """ for name in review_options.keys(): if name in review_set: value = review_set[name] else: value = review_defaults[name] setattr(Options.options, name, value) def compare_review_set(self, set1, set2): """ Return true if the review sets specified are equal. """ if len(set1.keys()) != len(set2.keys()): return False for key in set1.keys(): if not key in set2 or set1[key] != set2[key]: return False return True def display_review_set(self, review_set): """ Return the string representing the review set specified. """ term_width = Logs.get_term_cols() lines = [] for dest in review_options.keys(): opt = review_options[dest] name = ", ".join(opt._short_opts + opt._long_opts) help = opt.help actual = None if dest in review_set: actual = review_set[dest] default = review_defaults[dest] lines.append(self.format_option(name, help, actual, default, term_width)) return "Configuration:\n\n" + "\n\n".join(lines) + "\n" def format_option(self, name, help, actual, default, term_width): """ Return the string representing the option specified. """ def val_to_str(val): if val == None or val == '': return "(void)" return str(val) max_name_len = 20 sep_len = 2 w = textwrap.TextWrapper() w.width = term_width - 1 if w.width < 60: w.width = 60 out = "" # format the help out += w.fill(help) + "\n" # format the name name_len = len(name) out += Logs.colors.CYAN + name + Logs.colors.NORMAL # set the indentation used when the value wraps to the next line w.subsequent_indent = " ".rjust(max_name_len + sep_len) w.width -= (max_name_len + sep_len) # the name string is too long, switch to the next line if name_len > max_name_len: out += "\n" + w.subsequent_indent # fill the remaining of the line with spaces else: out += " ".rjust(max_name_len + sep_len - name_len) # format the actual value, if there is one if actual != None: out += Logs.colors.BOLD + w.fill(val_to_str(actual)) + Logs.colors.NORMAL + "\n" + w.subsequent_indent # format the default value default_fmt = val_to_str(default) if actual != None: default_fmt = "default: " + default_fmt out += Logs.colors.NORMAL + w.fill(default_fmt) + Logs.colors.NORMAL return out # Monkey-patch ConfigurationContext.execute() to have it store the review set. old_configure_execute = Configure.ConfigurationContext.execute def new_configure_execute(self): old_configure_execute(self) Context.create_context('review').store_review_set(new_review_set) Configure.ConfigurationContext.execute = new_configure_execute ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/rst.py0000660000000000000000000001544700000000000022110 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Jérôme Carretero, 2013 (zougloub) """ reStructuredText support (experimental) Example:: def configure(conf): conf.load('rst') if not conf.env.RST2HTML: conf.fatal('The program rst2html is required') def build(bld): bld( features = 'rst', type = 'rst2html', # rst2html, rst2pdf, ... source = 'index.rst', # mandatory, the source deps = 'image.png', # to give additional non-trivial dependencies ) By default the tool looks for a set of programs in PATH. The tools are defined in `rst_progs`. To configure with a special program use:: $ RST2HTML=/path/to/rst2html waf configure This tool is experimental; don't hesitate to contribute to it. """ import re from waflib import Node, Utils, Task, Errors, Logs from waflib.TaskGen import feature, before_method rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split() def parse_rst_node(task, node, nodes, names, seen, dirs=None): # TODO add extensibility, to handle custom rst include tags... if dirs is None: dirs = (node.parent,node.get_bld().parent) if node in seen: return seen.append(node) code = node.read() re_rst = re.compile(r'^\s*.. ((?P\|\S+\|) )?(?Pinclude|image|figure):: (?P.*)$', re.M) for match in re_rst.finditer(code): ipath = match.group('file') itype = match.group('type') Logs.debug('rst: visiting %s: %s', itype, ipath) found = False for d in dirs: Logs.debug('rst: looking for %s in %s', ipath, d.abspath()) found = d.find_node(ipath) if found: Logs.debug('rst: found %s as %s', ipath, found.abspath()) nodes.append((itype, found)) if itype == 'include': parse_rst_node(task, found, nodes, names, seen) break if not found: names.append((itype, ipath)) class docutils(Task.Task): """ Compile a rst file. """ def scan(self): """ A recursive regex-based scanner that finds rst dependencies. """ nodes = [] names = [] seen = [] node = self.inputs[0] if not node: return (nodes, names) parse_rst_node(self, node, nodes, names, seen) Logs.debug('rst: %r: found the following file deps: %r', self, nodes) if names: Logs.warn('rst: %r: could not find the following file deps: %r', self, names) return ([v for (t,v) in nodes], [v for (t,v) in names]) def check_status(self, msg, retcode): """ Check an exit status and raise an error with a particular message :param msg: message to display if the code is non-zero :type msg: string :param retcode: condition :type retcode: boolean """ if retcode != 0: raise Errors.WafError('%r command exit status %r' % (msg, retcode)) def run(self): """ Runs the rst compilation using docutils """ raise NotImplementedError() class rst2html(docutils): color = 'BLUE' def __init__(self, *args, **kw): docutils.__init__(self, *args, **kw) self.command = self.generator.env.RST2HTML self.attributes = ['stylesheet'] def scan(self): nodes, names = docutils.scan(self) for attribute in self.attributes: stylesheet = getattr(self.generator, attribute, None) if stylesheet is not None: ssnode = self.generator.to_nodes(stylesheet)[0] nodes.append(ssnode) Logs.debug('rst: adding dep to %s %s', attribute, stylesheet) return nodes, names def run(self): cwdn = self.outputs[0].parent src = self.inputs[0].path_from(cwdn) dst = self.outputs[0].path_from(cwdn) cmd = self.command + [src, dst] cmd += Utils.to_list(getattr(self.generator, 'options', [])) for attribute in self.attributes: stylesheet = getattr(self.generator, attribute, None) if stylesheet is not None: stylesheet = self.generator.to_nodes(stylesheet)[0] cmd += ['--%s' % attribute, stylesheet.path_from(cwdn)] return self.exec_command(cmd, cwd=cwdn.abspath()) class rst2s5(rst2html): def __init__(self, *args, **kw): rst2html.__init__(self, *args, **kw) self.command = self.generator.env.RST2S5 self.attributes = ['stylesheet'] class rst2latex(rst2html): def __init__(self, *args, **kw): rst2html.__init__(self, *args, **kw) self.command = self.generator.env.RST2LATEX self.attributes = ['stylesheet'] class rst2xetex(rst2html): def __init__(self, *args, **kw): rst2html.__init__(self, *args, **kw) self.command = self.generator.env.RST2XETEX self.attributes = ['stylesheet'] class rst2pdf(docutils): color = 'BLUE' def run(self): cwdn = self.outputs[0].parent src = self.inputs[0].path_from(cwdn) dst = self.outputs[0].path_from(cwdn) cmd = self.generator.env.RST2PDF + [src, '-o', dst] cmd += Utils.to_list(getattr(self.generator, 'options', [])) return self.exec_command(cmd, cwd=cwdn.abspath()) @feature('rst') @before_method('process_source') def apply_rst(self): """ Create :py:class:`rst` or other rst-related task objects """ if self.target: if isinstance(self.target, Node.Node): tgt = self.target elif isinstance(self.target, str): tgt = self.path.get_bld().make_node(self.target) else: self.bld.fatal("rst: Don't know how to build target name %s which is not a string or Node for %s" % (self.target, self)) else: tgt = None tsk_type = getattr(self, 'type', None) src = self.to_nodes(self.source) assert len(src) == 1 src = src[0] if tsk_type is not None and tgt is None: if tsk_type.startswith('rst2'): ext = tsk_type[4:] else: self.bld.fatal("rst: Could not detect the output file extension for %s" % self) tgt = src.change_ext('.%s' % ext) elif tsk_type is None and tgt is not None: out = tgt.name ext = out[out.rfind('.')+1:] self.type = 'rst2' + ext elif tsk_type is not None and tgt is not None: # the user knows what he wants pass else: self.bld.fatal("rst: Need to indicate task type or target name for %s" % self) deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for filename in deps: n = self.path.find_resource(filename) if not n: self.bld.fatal('Could not find %r for %r' % (filename, self)) if not n in deps_lst: deps_lst.append(n) try: task = self.create_task(self.type, src, tgt) except KeyError: self.bld.fatal("rst: Task of type %s not implemented (created by %s)" % (self.type, self)) task.env = self.env # add the manual dependencies if deps_lst: try: lst = self.bld.node_deps[task.uid()] for n in deps_lst: if not n in lst: lst.append(n) except KeyError: self.bld.node_deps[task.uid()] = deps_lst inst_to = getattr(self, 'install_path', None) if inst_to: self.install_task = self.add_install_files(install_to=inst_to, install_from=task.outputs[:]) self.source = [] def configure(self): """ Try to find the rst programs. Do not raise any error if they are not found. You'll have to use additional code in configure() to die if programs were not found. """ for p in rst_progs: self.find_program(p, mandatory=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/run_do_script.py0000660000000000000000000001157500000000000024150 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a Stata do-script in the directory specified by **ctx.bldnode**. The first and only argument will be the name of the do-script (no extension), which can be accessed inside the do-script by the local macro `1'. Useful for keeping a log file. The tool uses the log file that is automatically kept by Stata only for error-catching purposes, it will be destroyed if the task finished without error. In case of an error in **some_script.do**, you can inspect it as **some_script.log** in the **ctx.bldnode** directory. Note that Stata will not return an error code if it exits abnormally -- catching errors relies on parsing the log file mentioned before. Should the parser behave incorrectly please send an email to hmgaudecker [at] gmail. **WARNING** The tool will not work if multiple do-scripts of the same name---but in different directories---are run at the same time! Avoid this situation. Usage:: ctx(features='run_do_script', source='some_script.do', target=['some_table.tex', 'some_figure.eps'], deps='some_data.csv') """ import os, re, sys from waflib import Task, TaskGen, Logs if sys.platform == 'darwin': STATA_COMMANDS = ['Stata64MP', 'StataMP', 'Stata64SE', 'StataSE', 'Stata64', 'Stata'] STATAFLAGS = '-e -q do' STATAENCODING = 'MacRoman' elif sys.platform.startswith('linux'): STATA_COMMANDS = ['stata-mp', 'stata-se', 'stata'] STATAFLAGS = '-b -q do' # Not sure whether this is correct... STATAENCODING = 'Latin-1' elif sys.platform.lower().startswith('win'): STATA_COMMANDS = ['StataMP-64', 'StataMP-ia', 'StataMP', 'StataSE-64', 'StataSE-ia', 'StataSE', 'Stata-64', 'Stata-ia', 'Stata.e', 'WMPSTATA', 'WSESTATA', 'WSTATA'] STATAFLAGS = '/e do' STATAENCODING = 'Latin-1' else: raise Exception("Unknown sys.platform: %s " % sys.platform) def configure(ctx): ctx.find_program(STATA_COMMANDS, var='STATACMD', errmsg="""\n No Stata executable found!\n\n If Stata is needed:\n 1) Check the settings of your system path. 2) Note we are looking for Stata executables called: %s If yours has a different name, please report to hmgaudecker [at] gmail\n Else:\n Do not load the 'run_do_script' tool in the main wscript.\n\n""" % STATA_COMMANDS) ctx.env.STATAFLAGS = STATAFLAGS ctx.env.STATAENCODING = STATAENCODING class run_do_script_base(Task.Task): """Run a Stata do-script from the bldnode directory.""" run_str = '"${STATACMD}" ${STATAFLAGS} "${SRC[0].abspath()}" "${DOFILETRUNK}"' shell = True class run_do_script(run_do_script_base): """Use the log file automatically kept by Stata for error-catching. Erase it if the task finished without error. If not, it will show up as do_script.log in the bldnode directory. """ def run(self): run_do_script_base.run(self) ret, log_tail = self.check_erase_log_file() if ret: Logs.error("""Running Stata on %r failed with code %r.\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", self.inputs[0], ret, self.env.LOGFILEPATH, log_tail) return ret def check_erase_log_file(self): """Parse Stata's default log file and erase it if everything okay. Parser is based on Brendan Halpin's shell script found here: http://teaching.sociology.ul.ie/bhalpin/wordpress/?p=122 """ if sys.version_info.major >= 3: kwargs = {'file': self.env.LOGFILEPATH, 'mode': 'r', 'encoding': self.env.STATAENCODING} else: kwargs = {'name': self.env.LOGFILEPATH, 'mode': 'r'} with open(**kwargs) as log: log_tail = log.readlines()[-10:] for line in log_tail: error_found = re.match(r"r\(([0-9]+)\)", line) if error_found: return error_found.group(1), ''.join(log_tail) else: pass # Only end up here if the parser did not identify an error. os.remove(self.env.LOGFILEPATH) return None, None @TaskGen.feature('run_do_script') @TaskGen.before_method('process_source') def apply_run_do_script(tg): """Task generator customising the options etc. to call Stata in batch mode for running a do-script. """ # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] tsk = tg.create_task('run_do_script', src=src_node, tgt=tgt_nodes) tsk.env.DOFILETRUNK = os.path.splitext(src_node.name)[0] tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s.log' % (tsk.env.DOFILETRUNK)) # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/run_m_script.py0000660000000000000000000000577200000000000024004 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a Matlab script. Note that the script is run in the directory where it lives -- Matlab won't allow it any other way. For error-catching purposes, keep an own log-file that is destroyed if the task finished without error. If not, it will show up as mscript_[index].log in the bldnode directory. Usage:: ctx(features='run_m_script', source='some_script.m', target=['some_table.tex', 'some_figure.eps'], deps='some_data.mat') """ import os, sys from waflib import Task, TaskGen, Logs MATLAB_COMMANDS = ['matlab'] def configure(ctx): ctx.find_program(MATLAB_COMMANDS, var='MATLABCMD', errmsg = """\n No Matlab executable found!\n\n If Matlab is needed:\n 1) Check the settings of your system path. 2) Note we are looking for Matlab executables called: %s If yours has a different name, please report to hmgaudecker [at] gmail\n Else:\n Do not load the 'run_m_script' tool in the main wscript.\n\n""" % MATLAB_COMMANDS) ctx.env.MATLABFLAGS = '-wait -nojvm -nosplash -minimize' class run_m_script_base(Task.Task): """Run a Matlab script.""" run_str = '"${MATLABCMD}" ${MATLABFLAGS} -logfile "${LOGFILEPATH}" -r "try, ${MSCRIPTTRUNK}, exit(0), catch err, disp(err.getReport()), exit(1), end"' shell = True class run_m_script(run_m_script_base): """Erase the Matlab overall log file if everything went okay, else raise an error and print its 10 last lines. """ def run(self): ret = run_m_script_base.run(self) logfile = self.env.LOGFILEPATH if ret: mode = 'r' if sys.version_info.major >= 3: mode = 'rb' with open(logfile, mode=mode) as f: tail = f.readlines()[-10:] Logs.error("""Running Matlab on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", self.inputs[0], ret, logfile, '\n'.join(tail)) else: os.remove(logfile) return ret @TaskGen.feature('run_m_script') @TaskGen.before_method('process_source') def apply_run_m_script(tg): """Task generator customising the options etc. to call Matlab in batch mode for running a m-script. """ # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] tsk = tg.create_task('run_m_script', src=src_node, tgt=tgt_nodes) tsk.cwd = src_node.parent.abspath() tsk.env.MSCRIPTTRUNK = os.path.splitext(src_node.name)[0] tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (tsk.env.MSCRIPTTRUNK, tg.idx)) # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/run_py_script.py0000660000000000000000000000741400000000000024173 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a Python script in the directory specified by **ctx.bldnode**. Select a Python version by specifying the **version** keyword for the task generator instance as integer 2 or 3. Default is 3. If the build environment has an attribute "PROJECT_PATHS" with a key "PROJECT_ROOT", its value will be appended to the PYTHONPATH. Same a string passed to the optional **add_to_pythonpath** keyword (appended after the PROJECT_ROOT). Usage:: ctx(features='run_py_script', version=3, source='some_script.py', target=['some_table.tex', 'some_figure.eps'], deps='some_data.csv', add_to_pythonpath='src/some/library') """ import os, re from waflib import Task, TaskGen, Logs def configure(conf): """TODO: Might need to be updated for Windows once "PEP 397":http://www.python.org/dev/peps/pep-0397/ is settled. """ conf.find_program('python', var='PY2CMD', mandatory=False) conf.find_program('python3', var='PY3CMD', mandatory=False) if not conf.env.PY2CMD and not conf.env.PY3CMD: conf.fatal("No Python interpreter found!") class run_py_2_script(Task.Task): """Run a Python 2 script.""" run_str = '${PY2CMD} ${SRC[0].abspath()}' shell=True class run_py_3_script(Task.Task): """Run a Python 3 script.""" run_str = '${PY3CMD} ${SRC[0].abspath()}' shell=True @TaskGen.feature('run_py_script') @TaskGen.before_method('process_source') def apply_run_py_script(tg): """Task generator for running either Python 2 or Python 3 on a single script. Attributes: * source -- A **single** source node or string. (required) * target -- A single target or list of targets (nodes or strings) * deps -- A single dependency or list of dependencies (nodes or strings) * add_to_pythonpath -- A string that will be appended to the PYTHONPATH environment variable If the build environment has an attribute "PROJECT_PATHS" with a key "PROJECT_ROOT", its value will be appended to the PYTHONPATH. """ # Set the Python version to use, default to 3. v = getattr(tg, 'version', 3) if v not in (2, 3): raise ValueError("Specify the 'version' attribute for run_py_script task generator as integer 2 or 3.\n Got: %s" %v) # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] # Create the task. tsk = tg.create_task('run_py_%d_script' %v, src=src_node, tgt=tgt_nodes) # custom execution environment # TODO use a list and os.sep.join(lst) at the end instead of concatenating strings tsk.env.env = dict(os.environ) tsk.env.env['PYTHONPATH'] = tsk.env.env.get('PYTHONPATH', '') project_paths = getattr(tsk.env, 'PROJECT_PATHS', None) if project_paths and 'PROJECT_ROOT' in project_paths: tsk.env.env['PYTHONPATH'] += os.pathsep + project_paths['PROJECT_ROOT'].abspath() if getattr(tg, 'add_to_pythonpath', None): tsk.env.env['PYTHONPATH'] += os.pathsep + tg.add_to_pythonpath # Clean up the PYTHONPATH -- replace double occurrences of path separator tsk.env.env['PYTHONPATH'] = re.sub(os.pathsep + '+', os.pathsep, tsk.env.env['PYTHONPATH']) # Clean up the PYTHONPATH -- doesn't like starting with path separator if tsk.env.env['PYTHONPATH'].startswith(os.pathsep): tsk.env.env['PYTHONPATH'] = tsk.env.env['PYTHONPATH'][1:] # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/run_r_script.py0000660000000000000000000000531600000000000024003 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a R script in the directory specified by **ctx.bldnode**. For error-catching purposes, keep an own log-file that is destroyed if the task finished without error. If not, it will show up as rscript_[index].log in the bldnode directory. Usage:: ctx(features='run_r_script', source='some_script.r', target=['some_table.tex', 'some_figure.eps'], deps='some_data.csv') """ import os, sys from waflib import Task, TaskGen, Logs R_COMMANDS = ['RTerm', 'R', 'r'] def configure(ctx): ctx.find_program(R_COMMANDS, var='RCMD', errmsg = """\n No R executable found!\n\n If R is needed:\n 1) Check the settings of your system path. 2) Note we are looking for R executables called: %s If yours has a different name, please report to hmgaudecker [at] gmail\n Else:\n Do not load the 'run_r_script' tool in the main wscript.\n\n""" % R_COMMANDS) ctx.env.RFLAGS = 'CMD BATCH --slave' class run_r_script_base(Task.Task): """Run a R script.""" run_str = '"${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" "${LOGFILEPATH}"' shell = True class run_r_script(run_r_script_base): """Erase the R overall log file if everything went okay, else raise an error and print its 10 last lines. """ def run(self): ret = run_r_script_base.run(self) logfile = self.env.LOGFILEPATH if ret: mode = 'r' if sys.version_info.major >= 3: mode = 'rb' with open(logfile, mode=mode) as f: tail = f.readlines()[-10:] Logs.error("""Running R on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", self.inputs[0], ret, logfile, '\n'.join(tail)) else: os.remove(logfile) return ret @TaskGen.feature('run_r_script') @TaskGen.before_method('process_source') def apply_run_r_script(tg): """Task generator customising the options etc. to call R in batch mode for running a R script. """ # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes) tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (os.path.splitext(src_node.name)[0], tg.idx)) # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/sas.py0000660000000000000000000000363200000000000022057 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Mark Coggeshall, 2010 "SAS support" import os from waflib import Task, Errors, Logs from waflib.TaskGen import feature, before_method sas_fun, _ = Task.compile_fun('sas -sysin ${SRCFILE} -log ${LOGFILE} -print ${LSTFILE}', shell=False) class sas(Task.Task): vars = ['SAS', 'SASFLAGS'] def run(task): command = 'SAS' fun = sas_fun node = task.inputs[0] logfilenode = node.change_ext('.log') lstfilenode = node.change_ext('.lst') # set the cwd task.cwd = task.inputs[0].parent.get_src().abspath() Logs.debug('runner: %r on %r', command, node) SASINPUTS = node.parent.get_bld().abspath() + os.pathsep + node.parent.get_src().abspath() + os.pathsep task.env.env = {'SASINPUTS': SASINPUTS} task.env.SRCFILE = node.abspath() task.env.LOGFILE = logfilenode.abspath() task.env.LSTFILE = lstfilenode.abspath() ret = fun(task) if ret: Logs.error('Running %s on %r returned a non-zero exit', command, node) Logs.error('SRCFILE = %r', node) Logs.error('LOGFILE = %r', logfilenode) Logs.error('LSTFILE = %r', lstfilenode) return ret @feature('sas') @before_method('process_source') def apply_sas(self): if not getattr(self, 'type', None) in ('sas',): self.type = 'sas' self.env['logdir'] = getattr(self, 'logdir', 'log') self.env['lstdir'] = getattr(self, 'lstdir', 'lst') deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for filename in deps: n = self.path.find_resource(filename) if not n: n = self.bld.root.find_resource(filename) if not n: raise Errors.WafError('cannot find input file %s for processing' % filename) if not n in deps_lst: deps_lst.append(n) for node in self.to_nodes(self.source): if self.type == 'sas': task = self.create_task('sas', src=node) task.dep_nodes = deps_lst self.source = [] def configure(self): self.find_program('sas', var='SAS', mandatory=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/satellite_assembly.py0000660000000000000000000000416100000000000025154 0ustar00rootroot00000000000000#!/usr/bin/python # encoding: utf-8 # vim: tabstop=4 noexpandtab """ Create a satellite assembly from "*.??.txt" files. ?? stands for a language code. The projects Resources subfolder contains resources.??.txt string files for several languages. The build folder will hold the satellite assemblies as ./??/ExeName.resources.dll #gen becomes template (It is called gen because it also uses resx.py). bld(source='Resources/resources.de.txt',gen=ExeName) """ import os, re from waflib import Task from waflib.TaskGen import feature,before_method class al(Task.Task): run_str = '${AL} ${ALFLAGS}' @feature('satellite_assembly') @before_method('process_source') def satellite_assembly(self): if not getattr(self, 'gen', None): self.bld.fatal('satellite_assembly needs a template assembly provided with the "gen" parameter') res_lang = re.compile(r'(.*)\.(\w\w)\.(?:resx|txt)',flags=re.I) # self.source can contain node objects, so this will break in one way or another self.source = self.to_list(self.source) for i, x in enumerate(self.source): #x = 'resources/resources.de.resx' #x = 'resources/resources.de.txt' mo = res_lang.match(x) if mo: template = os.path.splitext(self.gen)[0] templatedir, templatename = os.path.split(template) res = mo.group(1) lang = mo.group(2) #./Resources/resources.de.resources resources = self.path.find_or_declare(res+ '.' + lang + '.resources') self.create_task('resgen', self.to_nodes(x), [resources]) #./de/Exename.resources.dll satellite = self.path.find_or_declare(os.path.join(templatedir,lang,templatename) + '.resources.dll') tsk = self.create_task('al',[resources],[satellite]) tsk.env.append_value('ALFLAGS','/template:'+os.path.join(self.path.relpath(),self.gen)) tsk.env.append_value('ALFLAGS','/embed:'+resources.relpath()) tsk.env.append_value('ALFLAGS','/culture:'+lang) tsk.env.append_value('ALFLAGS','/out:'+satellite.relpath()) self.source[i] = None # remove the None elements that we just substituted self.source = list(filter(lambda x:x, self.source)) def configure(ctx): ctx.find_program('al', var='AL', mandatory=True) ctx.load('resx') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/scala.py0000660000000000000000000000637700000000000022365 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) """ Scala support scalac outputs files a bit where it wants to """ import os from waflib import Task, Utils, Node from waflib.TaskGen import feature, before_method, after_method from waflib.Tools import ccroot ccroot.USELIB_VARS['scalac'] = set(['CLASSPATH', 'SCALACFLAGS']) from waflib.Tools import javaw @feature('scalac') @before_method('process_source') def apply_scalac(self): Utils.def_attrs(self, jarname='', classpath='', sourcepath='.', srcdir='.', jar_mf_attributes={}, jar_mf_classpath=[]) outdir = getattr(self, 'outdir', None) if outdir: if not isinstance(outdir, Node.Node): outdir = self.path.get_bld().make_node(self.outdir) else: outdir = self.path.get_bld() outdir.mkdir() self.env['OUTDIR'] = outdir.abspath() self.scalac_task = tsk = self.create_task('scalac') tmp = [] srcdir = getattr(self, 'srcdir', '') if isinstance(srcdir, Node.Node): srcdir = [srcdir] for x in Utils.to_list(srcdir): if isinstance(x, Node.Node): y = x else: y = self.path.find_dir(x) if not y: self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) tmp.append(y) tsk.srcdir = tmp # reuse some code feature('scalac')(javaw.use_javac_files) after_method('apply_scalac')(javaw.use_javac_files) feature('scalac')(javaw.set_classpath) after_method('apply_scalac', 'use_scalac_files')(javaw.set_classpath) SOURCE_RE = '**/*.scala' class scalac(javaw.javac): color = 'GREEN' vars = ['CLASSPATH', 'SCALACFLAGS', 'SCALAC', 'OUTDIR'] def runnable_status(self): """ Wait for dependent tasks to be complete, then read the file system to find the input nodes. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: global SOURCE_RE self.inputs = [] for x in self.srcdir: self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False)) return super(javaw.javac, self).runnable_status() def run(self): """ Execute the scalac compiler """ env = self.env gen = self.generator bld = gen.bld wd = bld.bldnode.abspath() def to_list(xx): if isinstance(xx, str): return [xx] return xx self.last_cmd = lst = [] lst.extend(to_list(env['SCALAC'])) lst.extend(['-classpath']) lst.extend(to_list(env['CLASSPATH'])) lst.extend(['-d']) lst.extend(to_list(env['OUTDIR'])) lst.extend(to_list(env['SCALACFLAGS'])) lst.extend([a.abspath() for a in self.inputs]) lst = [x for x in lst if x] try: self.out = self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, output=0, quiet=0)[1] except: self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None) def configure(self): """ Detect the scalac program """ # If SCALA_HOME is set, we prepend it to the path list java_path = self.environ['PATH'].split(os.pathsep) v = self.env if 'SCALA_HOME' in self.environ: java_path = [os.path.join(self.environ['SCALA_HOME'], 'bin')] + java_path self.env['SCALA_HOME'] = [self.environ['SCALA_HOME']] for x in 'scalac scala'.split(): self.find_program(x, var=x.upper(), path_list=java_path) if 'CLASSPATH' in self.environ: v['CLASSPATH'] = self.environ['CLASSPATH'] v.SCALACFLAGS = ['-verbose'] if not v['SCALAC']: self.fatal('scalac is required for compiling scala classes') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/slow_qt4.py0000660000000000000000000000537600000000000023054 0ustar00rootroot00000000000000#! /usr/bin/env python # Thomas Nagy, 2011 (ita) """ Create _moc.cpp files The builds are 30-40% faster when .moc files are included, you should NOT use this tool. If you really really want it: def configure(conf): conf.load('compiler_cxx qt4') conf.load('slow_qt4') See playground/slow_qt/wscript for a complete example. """ from waflib.TaskGen import extension from waflib import Task import waflib.Tools.qt4 import waflib.Tools.cxx @extension(*waflib.Tools.qt4.EXT_QT4) def cxx_hook(self, node): return self.create_compiled_task('cxx_qt', node) class cxx_qt(Task.classes['cxx']): def runnable_status(self): ret = Task.classes['cxx'].runnable_status(self) if ret != Task.ASK_LATER and not getattr(self, 'moc_done', None): try: cache = self.generator.moc_cache except AttributeError: cache = self.generator.moc_cache = {} deps = self.generator.bld.node_deps[self.uid()] for x in [self.inputs[0]] + deps: if x.read().find('Q_OBJECT') > 0: # process "foo.h -> foo.moc" only if "foo.cpp" is in the sources for the current task generator # this code will work because it is in the main thread (runnable_status) if x.name.rfind('.') > -1: # a .h file... name = x.name[:x.name.rfind('.')] for tsk in self.generator.compiled_tasks: if tsk.inputs and tsk.inputs[0].name.startswith(name): break else: # no corresponding file, continue continue # the file foo.cpp could be compiled for a static and a shared library - hence the %number in the name cxx_node = x.parent.get_bld().make_node(x.name.replace('.', '_') + '_%d_moc.cpp' % self.generator.idx) if cxx_node in cache: continue cache[cxx_node] = self tsk = Task.classes['moc'](env=self.env, generator=self.generator) tsk.set_inputs(x) tsk.set_outputs(cxx_node) if x.name.endswith('.cpp'): # moc is trying to be too smart but it is too dumb: # why forcing the #include when Q_OBJECT is in the cpp file? gen = self.generator.bld.producer gen.outstanding.append(tsk) gen.total += 1 self.set_run_after(tsk) else: cxxtsk = Task.classes['cxx'](env=self.env, generator=self.generator) cxxtsk.set_inputs(tsk.outputs) cxxtsk.set_outputs(cxx_node.change_ext('.o')) cxxtsk.set_run_after(tsk) try: self.more_tasks.extend([tsk, cxxtsk]) except AttributeError: self.more_tasks = [tsk, cxxtsk] try: link = self.generator.link_task except AttributeError: pass else: link.set_run_after(cxxtsk) link.inputs.extend(cxxtsk.outputs) link.inputs.sort(key=lambda x: x.abspath()) self.moc_done = True for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return ret ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/softlink_libs.py0000660000000000000000000000452500000000000024135 0ustar00rootroot00000000000000#! /usr/bin/env python # per rosengren 2011 from waflib.TaskGen import feature, after_method from waflib.Task import Task, always_run from os.path import basename, isabs from os import tmpfile, linesep def options(opt): grp = opt.add_option_group('Softlink Libraries Options') grp.add_option('--exclude', default='/usr/lib,/lib', help='No symbolic links are created for libs within [%default]') def configure(cnf): cnf.find_program('ldd') if not cnf.env.SOFTLINK_EXCLUDE: cnf.env.SOFTLINK_EXCLUDE = cnf.options.exclude.split(',') @feature('softlink_libs') @after_method('process_rule') def add_finder(self): tgt = self.path.find_or_declare(self.target) self.create_task('sll_finder', tgt=tgt) self.create_task('sll_installer', tgt=tgt) always_run(sll_installer) class sll_finder(Task): ext_out = 'softlink_libs' def run(self): bld = self.generator.bld linked=[] target_paths = [] for g in bld.groups: for tgen in g: # FIXME it might be better to check if there is a link_task (getattr?) target_paths += [tgen.path.get_bld().bldpath()] linked += [t.outputs[0].bldpath() for t in getattr(tgen, 'tasks', []) if t.__class__.__name__ in ['cprogram', 'cshlib', 'cxxprogram', 'cxxshlib']] lib_list = [] if len(linked): cmd = [self.env.LDD] + linked # FIXME add DYLD_LIBRARY_PATH+PATH for osx+win32 ldd_env = {'LD_LIBRARY_PATH': ':'.join(target_paths + self.env.LIBPATH)} # FIXME the with syntax will not work in python 2 with tmpfile() as result: self.exec_command(cmd, env=ldd_env, stdout=result) result.seek(0) for line in result.readlines(): words = line.split() if len(words) < 3 or words[1] != '=>': continue lib = words[2] if lib == 'not': continue if any([lib.startswith(p) for p in [bld.bldnode.abspath(), '('] + self.env.SOFTLINK_EXCLUDE]): continue if not isabs(lib): continue lib_list.append(lib) lib_list = sorted(set(lib_list)) self.outputs[0].write(linesep.join(lib_list + self.env.DYNAMIC_LIBS)) return 0 class sll_installer(Task): ext_in = 'softlink_libs' def run(self): tgt = self.outputs[0] self.generator.bld.install_files('${LIBDIR}', tgt, postpone=False) lib_list=tgt.read().split() for lib in lib_list: self.generator.bld.symlink_as('${LIBDIR}/'+basename(lib), lib, postpone=False) return 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/sphinx.py0000660000000000000000000000674500000000000022612 0ustar00rootroot00000000000000"""Support for Sphinx documentation This is a wrapper for sphinx-build program. Please note that sphinx-build supports only one output format which can passed to build via sphinx_output_format attribute. The default output format is html. Example wscript: def configure(cnf): conf.load('sphinx') def build(bld): bld( features='sphinx', sphinx_source='sources', # path to source directory sphinx_options='-a -v', # sphinx-build program additional options sphinx_output_format='man' # output format of sphinx documentation ) """ from waflib.Node import Node from waflib import Utils from waflib import Task from waflib.TaskGen import feature, after_method def configure(cnf): """Check if sphinx-build program is available and loads gnu_dirs tool.""" cnf.find_program('sphinx-build', var='SPHINX_BUILD', mandatory=False) cnf.load('gnu_dirs') @feature('sphinx') def build_sphinx(self): """Builds sphinx sources. """ if not self.env.SPHINX_BUILD: self.bld.fatal('Program SPHINX_BUILD not defined.') if not getattr(self, 'sphinx_source', None): self.bld.fatal('Attribute sphinx_source not defined.') if not isinstance(self.sphinx_source, Node): self.sphinx_source = self.path.find_node(self.sphinx_source) if not self.sphinx_source: self.bld.fatal('Can\'t find sphinx_source: %r' % self.sphinx_source) Utils.def_attrs(self, sphinx_output_format='html') self.env.SPHINX_OUTPUT_FORMAT = self.sphinx_output_format self.env.SPHINX_OPTIONS = getattr(self, 'sphinx_options', []) for source_file in self.sphinx_source.ant_glob('**/*'): self.bld.add_manual_dependency(self.sphinx_source, source_file) sphinx_build_task = self.create_task('SphinxBuildingTask') sphinx_build_task.set_inputs(self.sphinx_source) sphinx_build_task.set_outputs(self.path.get_bld()) # the sphinx-build results are in directory self.sphinx_output_directory = self.path.get_bld().make_node(self.env.SPHINX_OUTPUT_FORMAT) self.sphinx_output_directory.mkdir() Utils.def_attrs(self, install_path=get_install_path(self)) def get_install_path(tg): if tg.env.SPHINX_OUTPUT_FORMAT == 'man': return tg.env.MANDIR elif tg.env.SPHINX_OUTPUT_FORMAT == 'info': return tg.env.INFODIR else: return tg.env.DOCDIR class SphinxBuildingTask(Task.Task): color = 'BOLD' run_str = '${SPHINX_BUILD} -M ${SPHINX_OUTPUT_FORMAT} ${SRC} ${TGT} ${SPHINX_OPTIONS}' def keyword(self): return 'Compiling (%s)' % self.env.SPHINX_OUTPUT_FORMAT def runnable_status(self): for x in self.run_after: if not x.hasrun: return Task.ASK_LATER self.signature() ret = Task.Task.runnable_status(self) if ret == Task.SKIP_ME: # in case the files were removed self.add_install() return ret def post_run(self): self.add_install() return Task.Task.post_run(self) def add_install(self): nodes = self.generator.sphinx_output_directory.ant_glob('**/*', quiet=True) self.outputs += nodes self.generator.add_install_files(install_to=self.generator.install_path, install_from=nodes, postpone=False, cwd=self.generator.sphinx_output_directory, relative_trick=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/stale.py0000660000000000000000000000437100000000000022402 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Thomas Nagy, 2006-2015 (ita) """ Add a pre-build hook to remove build files (declared in the system) that do not have a corresponding target This can be used for example to remove the targets that have changed name without performing a full 'waf clean' Of course, it will only work if there are no dynamically generated nodes/tasks, in which case the method will have to be modified to exclude some folders for example. Make sure to set bld.post_mode = waflib.Build.POST_AT_ONCE """ from waflib import Logs, Build from waflib.Runner import Parallel DYNAMIC_EXT = [] # add your non-cleanable files/extensions here MOC_H_EXTS = '.cpp .cxx .hpp .hxx .h'.split() def can_delete(node): """Imperfect moc cleanup which does not look for a Q_OBJECT macro in the files""" if not node.name.endswith('.moc'): return True base = node.name[:-4] p1 = node.parent.get_src() p2 = node.parent.get_bld() for k in MOC_H_EXTS: h_name = base + k n = p1.search_node(h_name) if n: return False n = p2.search_node(h_name) if n: return False # foo.cpp.moc, foo.h.moc, etc. if base.endswith(k): return False return True # recursion over the nodes to find the stale files def stale_rec(node, nodes): if node.abspath() in node.ctx.env[Build.CFG_FILES]: return if getattr(node, 'children', []): for x in node.children.values(): if x.name != "c4che": stale_rec(x, nodes) else: for ext in DYNAMIC_EXT: if node.name.endswith(ext): break else: if not node in nodes: if can_delete(node): Logs.warn('Removing stale file -> %r', node) node.delete() old = Parallel.refill_task_list def refill_task_list(self): iit = old(self) bld = self.bld # execute this operation only once if getattr(self, 'stale_done', False): return iit self.stale_done = True # this does not work in partial builds if bld.targets != '*': return iit # this does not work in dynamic builds if getattr(bld, 'post_mode') == Build.POST_AT_ONCE: return iit # obtain the nodes to use during the build nodes = [] for tasks in bld.groups: for x in tasks: try: nodes.extend(x.outputs) except AttributeError: pass stale_rec(bld.bldnode, nodes) return iit Parallel.refill_task_list = refill_task_list ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/stracedeps.py0000660000000000000000000001000600000000000023417 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2015 (ita) """ Execute tasks through strace to obtain dependencies after the process is run. This scheme is similar to that of the Fabricate script. To use:: def configure(conf): conf.load('strace') WARNING: * This will not work when advanced scanners are needed (qt4/qt5) * The overhead of running 'strace' is significant (56s -> 1m29s) * It will not work on Windows :-) """ import os, re, threading from waflib import Task, Logs, Utils #TRACECALLS = 'trace=access,chdir,clone,creat,execve,exit_group,fork,lstat,lstat64,mkdir,open,rename,stat,stat64,symlink,vfork' TRACECALLS = 'trace=process,file' BANNED = ('/tmp', '/proc', '/sys', '/dev') s_process = r'(?:clone|fork|vfork)\(.*?(?P\d+)' s_file = r'(?P\w+)\("(?P([^"\\]|\\.)*)"(.*)' re_lines = re.compile(r'^(?P\d+)\s+(?:(?:%s)|(?:%s))\r*$' % (s_file, s_process), re.IGNORECASE | re.MULTILINE) strace_lock = threading.Lock() def configure(conf): conf.find_program('strace') def task_method(func): # Decorator function to bind/replace methods on the base Task class # # The methods Task.exec_command and Task.sig_implicit_deps already exists and are rarely overridden # we thus expect that we are the only ones doing this try: setattr(Task.Task, 'nostrace_%s' % func.__name__, getattr(Task.Task, func.__name__)) except AttributeError: pass setattr(Task.Task, func.__name__, func) return func @task_method def get_strace_file(self): try: return self.strace_file except AttributeError: pass if self.outputs: ret = self.outputs[0].abspath() + '.strace' else: ret = '%s%s%d%s' % (self.generator.bld.bldnode.abspath(), os.sep, id(self), '.strace') self.strace_file = ret return ret @task_method def get_strace_args(self): return (self.env.STRACE or ['strace']) + ['-e', TRACECALLS, '-f', '-o', self.get_strace_file()] @task_method def exec_command(self, cmd, **kw): bld = self.generator.bld if not 'cwd' in kw: kw['cwd'] = self.get_cwd() args = self.get_strace_args() fname = self.get_strace_file() if isinstance(cmd, list): cmd = args + cmd else: cmd = '%s %s' % (' '.join(args), cmd) try: ret = bld.exec_command(cmd, **kw) finally: if not ret: self.parse_strace_deps(fname, kw['cwd']) return ret @task_method def sig_implicit_deps(self): # bypass the scanner functions return @task_method def parse_strace_deps(self, path, cwd): # uncomment the following line to disable the dependencies and force a file scan # return try: cnt = Utils.readf(path) finally: try: os.remove(path) except OSError: pass if not isinstance(cwd, str): cwd = cwd.abspath() nodes = [] bld = self.generator.bld try: cache = bld.strace_cache except AttributeError: cache = bld.strace_cache = {} # chdir and relative paths pid_to_cwd = {} global BANNED done = set() for m in re.finditer(re_lines, cnt): # scraping the output of strace pid = m.group('pid') if m.group('npid'): npid = m.group('npid') pid_to_cwd[npid] = pid_to_cwd.get(pid, cwd) continue p = m.group('path').replace('\\"', '"') if p == '.' or m.group().find('= -1 ENOENT') > -1: # just to speed it up a bit continue if not os.path.isabs(p): p = os.path.join(pid_to_cwd.get(pid, cwd), p) call = m.group('call') if call == 'chdir': pid_to_cwd[pid] = p continue if p in done: continue done.add(p) for x in BANNED: if p.startswith(x): break else: if p.endswith('/') or os.path.isdir(p): continue try: node = cache[p] except KeyError: strace_lock.acquire() try: cache[p] = node = bld.root.find_node(p) if not node: continue finally: strace_lock.release() nodes.append(node) # record the dependencies then force the task signature recalculation for next time if Logs.verbose: Logs.debug('deps: real scanner for %r returned %r', self, nodes) bld = self.generator.bld bld.node_deps[self.uid()] = nodes bld.raw_deps[self.uid()] = [] try: del self.cache_sig except AttributeError: pass self.signature() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/swig.py0000660000000000000000000001415400000000000022243 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Petar Forai # Thomas Nagy 2008-2010 (ita) import re from waflib import Task, Logs from waflib.TaskGen import extension, feature, after_method from waflib.Configure import conf from waflib.Tools import c_preproc """ tasks have to be added dynamically: - swig interface files may be created at runtime - the module name may be unknown in advance """ SWIG_EXTS = ['.swig', '.i'] re_module = re.compile(r'%module(?:\s*\(.*\))?\s+(.+)', re.M) re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M) re_2 = re.compile(r'[#%](?:include|import(?:\(module=".*"\))+|python(?:begin|code)) [<"](.*)[">]', re.M) class swig(Task.Task): color = 'BLUE' run_str = '${SWIG} ${SWIGFLAGS} ${SWIGPATH_ST:INCPATHS} ${SWIGDEF_ST:DEFINES} ${SRC}' ext_out = ['.h'] # might produce .h files although it is not mandatory vars = ['SWIG_VERSION', 'SWIGDEPS'] def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not getattr(self, 'init_outputs', None): self.init_outputs = True if not getattr(self, 'module', None): # search the module name txt = self.inputs[0].read() m = re_module.search(txt) if not m: raise ValueError("could not find the swig module name") self.module = m.group(1) swig_c(self) # add the language-specific output files as nodes # call funs in the dict swig_langs for x in self.env['SWIGFLAGS']: # obtain the language x = x[1:] try: fun = swig_langs[x] except KeyError: pass else: fun(self) return super(swig, self).runnable_status() def scan(self): "scan for swig dependencies, climb the .i files" lst_src = [] seen = [] missing = [] to_see = [self.inputs[0]] while to_see: node = to_see.pop(0) if node in seen: continue seen.append(node) lst_src.append(node) # read the file code = node.read() code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) # find .i files and project headers names = re_2.findall(code) for n in names: for d in self.generator.includes_nodes + [node.parent]: u = d.find_resource(n) if u: to_see.append(u) break else: missing.append(n) return (lst_src, missing) # provide additional language processing swig_langs = {} def swigf(fun): swig_langs[fun.__name__.replace('swig_', '')] = fun return fun swig.swigf = swigf def swig_c(self): ext = '.swigwrap_%d.c' % self.generator.idx flags = self.env['SWIGFLAGS'] if '-c++' in flags: ext += 'xx' out_node = self.inputs[0].parent.find_or_declare(self.module + ext) if '-c++' in flags: c_tsk = self.generator.cxx_hook(out_node) else: c_tsk = self.generator.c_hook(out_node) c_tsk.set_run_after(self) # transfer weights from swig task to c task if getattr(self, 'weight', None): c_tsk.weight = self.weight if getattr(self, 'tree_weight', None): c_tsk.tree_weight = self.tree_weight try: self.more_tasks.append(c_tsk) except AttributeError: self.more_tasks = [c_tsk] try: ltask = self.generator.link_task except AttributeError: pass else: ltask.set_run_after(c_tsk) # setting input nodes does not declare the build order # because the build already started, but it sets # the dependency to enable rebuilds ltask.inputs.append(c_tsk.outputs[0]) self.outputs.append(out_node) if not '-o' in self.env['SWIGFLAGS']: self.env.append_value('SWIGFLAGS', ['-o', self.outputs[0].abspath()]) @swigf def swig_python(tsk): node = tsk.inputs[0].parent if tsk.outdir: node = tsk.outdir tsk.set_outputs(node.find_or_declare(tsk.module+'.py')) @swigf def swig_ocaml(tsk): node = tsk.inputs[0].parent if tsk.outdir: node = tsk.outdir tsk.set_outputs(node.find_or_declare(tsk.module+'.ml')) tsk.set_outputs(node.find_or_declare(tsk.module+'.mli')) @extension(*SWIG_EXTS) def i_file(self, node): # the task instance tsk = self.create_task('swig') tsk.set_inputs(node) tsk.module = getattr(self, 'swig_module', None) flags = self.to_list(getattr(self, 'swig_flags', [])) tsk.env.append_value('SWIGFLAGS', flags) tsk.outdir = None if '-outdir' in flags: outdir = flags[flags.index('-outdir')+1] outdir = tsk.generator.bld.bldnode.make_node(outdir) outdir.mkdir() tsk.outdir = outdir @feature('c', 'cxx', 'd', 'fc', 'asm') @after_method('apply_link', 'process_source') def enforce_swig_before_link(self): try: link_task = self.link_task except AttributeError: pass else: for x in self.tasks: if x.__class__.__name__ == 'swig': link_task.run_after.add(x) @conf def check_swig_version(conf, minver=None): """ Check if the swig tool is found matching a given minimum version. minver should be a tuple, eg. to check for swig >= 1.3.28 pass (1,3,28) as minver. If successful, SWIG_VERSION is defined as 'MAJOR.MINOR' (eg. '1.3') of the actual swig version found. :param minver: minimum version :type minver: tuple of int :return: swig version :rtype: tuple of int """ assert minver is None or isinstance(minver, tuple) swigbin = conf.env['SWIG'] if not swigbin: conf.fatal('could not find the swig executable') # Get swig version string cmd = swigbin + ['-version'] Logs.debug('swig: Running swig command %r', cmd) reg_swig = re.compile(r'SWIG Version\s(.*)', re.M) swig_out = conf.cmd_and_log(cmd) swigver_tuple = tuple([int(s) for s in reg_swig.findall(swig_out)[0].split('.')]) # Compare swig version with the minimum required result = (minver is None) or (swigver_tuple >= minver) if result: # Define useful environment variables swigver = '.'.join([str(x) for x in swigver_tuple[:2]]) conf.env['SWIG_VERSION'] = swigver # Feedback swigver_full = '.'.join(map(str, swigver_tuple[:3])) if minver is None: conf.msg('Checking for swig version', swigver_full) else: minver_str = '.'.join(map(str, minver)) conf.msg('Checking for swig version >= %s' % (minver_str,), swigver_full, color=result and 'GREEN' or 'YELLOW') if not result: conf.fatal('The swig version is too old, expecting %r' % (minver,)) return swigver_tuple def configure(conf): conf.find_program('swig', var='SWIG') conf.env.SWIGPATH_ST = '-I%s' conf.env.SWIGDEF_ST = '-D%s' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/syms.py0000660000000000000000000000621000000000000022257 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ This tool supports the export_symbols_regex to export the symbols in a shared library. by default, all symbols are exported by gcc, and nothing by msvc. to use the tool, do something like: def build(ctx): ctx(features='c cshlib syms', source='a.c b.c', export_symbols_regex='mylib_.*', target='testlib') only the symbols starting with 'mylib_' will be exported. """ import re from waflib.Context import STDOUT from waflib.Task import Task from waflib.Errors import WafError from waflib.TaskGen import feature, after_method class gen_sym(Task): def run(self): obj = self.inputs[0] kw = {} reg = getattr(self.generator, 'export_symbols_regex', '.+?') if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME): re_nm = re.compile(r'External\s+\|\s+_(?P%s)\b' % reg) cmd = (self.env.DUMPBIN or ['dumpbin']) + ['/symbols', obj.abspath()] else: if self.env.DEST_BINFMT == 'pe': #gcc uses nm, and has a preceding _ on windows re_nm = re.compile(r'(T|D)\s+_(?P%s)\b' % reg) elif self.env.DEST_BINFMT=='mac-o': re_nm=re.compile(r'(T|D)\s+(?P_?(%s))\b' % reg) else: re_nm = re.compile(r'(T|D)\s+(?P%s)\b' % reg) cmd = (self.env.NM or ['nm']) + ['-g', obj.abspath()] syms = [m.group('symbol') for m in re_nm.finditer(self.generator.bld.cmd_and_log(cmd, quiet=STDOUT, **kw))] self.outputs[0].write('%r' % syms) class compile_sym(Task): def run(self): syms = {} for x in self.inputs: slist = eval(x.read()) for s in slist: syms[s] = 1 lsyms = list(syms.keys()) lsyms.sort() if self.env.DEST_BINFMT == 'pe': self.outputs[0].write('EXPORTS\n' + '\n'.join(lsyms)) elif self.env.DEST_BINFMT == 'elf': self.outputs[0].write('{ global:\n' + ';\n'.join(lsyms) + ";\nlocal: *; };\n") elif self.env.DEST_BINFMT=='mac-o': self.outputs[0].write('\n'.join(lsyms) + '\n') else: raise WafError('NotImplemented') @feature('syms') @after_method('process_source', 'process_use', 'apply_link', 'process_uselib_local', 'propagate_uselib_vars') def do_the_symbol_stuff(self): def_node = self.path.find_or_declare(getattr(self, 'sym_file', self.target + '.def')) compiled_tasks = getattr(self, 'compiled_tasks', None) if compiled_tasks: ins = [x.outputs[0] for x in compiled_tasks] self.gen_sym_tasks = [self.create_task('gen_sym', x, x.change_ext('.%d.sym' % self.idx)) for x in ins] self.create_task('compile_sym', [x.outputs[0] for x in self.gen_sym_tasks], def_node) link_task = getattr(self, 'link_task', None) if link_task: self.link_task.dep_nodes.append(def_node) if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME): self.link_task.env.append_value('LINKFLAGS', ['/def:' + def_node.bldpath()]) elif self.env.DEST_BINFMT == 'pe': # gcc on windows takes *.def as an additional input self.link_task.inputs.append(def_node) elif self.env.DEST_BINFMT == 'elf': self.link_task.env.append_value('LINKFLAGS', ['-Wl,-version-script', '-Wl,' + def_node.bldpath()]) elif self.env.DEST_BINFMT=='mac-o': self.link_task.env.append_value('LINKFLAGS',['-Wl,-exported_symbols_list,' + def_node.bldpath()]) else: raise WafError('NotImplemented') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/ticgt.py0000660000000000000000000002240200000000000022377 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Texas Instruments code generator support (experimental) # When reporting issues, please directly assign the bug to the maintainer. __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2012" """ TI cgt6x is a compiler suite for TI DSPs. The toolchain does pretty weird things, and I'm sure I'm missing some of them. But still, the tool saves time. What this tool does is: - create a TI compiler environment - create TI compiler features, to handle some specifics about this compiler It has a few idiosyncracies, such as not giving the liberty of the .o file names - automatically activate them when using the TI compiler - handle the tconf tool The tool TODO: - the set_platform_flags() function is not nice - more tests - broaden tool scope, if needed """ import os, re from waflib import Options, Utils, Task, TaskGen from waflib.Tools import c, ccroot, c_preproc from waflib.Configure import conf from waflib.TaskGen import feature, before_method from waflib.Tools.c import cprogram opj = os.path.join @conf def find_ticc(conf): conf.find_program(['cl6x'], var='CC', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) conf.env.CC_NAME = 'ticc' @conf def find_tild(conf): conf.find_program(['lnk6x'], var='LINK_CC', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) conf.env.LINK_CC_NAME = 'tild' @conf def find_tiar(conf): conf.find_program(['ar6x'], var='AR', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) conf.env.AR_NAME = 'tiar' conf.env.ARFLAGS = 'qru' @conf def ticc_common_flags(conf): v = conf.env if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = [] v['CCLNK_TGT_F'] = ['-o'] v['CPPPATH_ST'] = '-I%s' v['DEFINES_ST'] = '-d%s' v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-i%s' # template for adding libpaths v['STLIB_ST'] = '-l=%s.lib' v['STLIBPATH_ST'] = '-i%s' # program v['cprogram_PATTERN'] = '%s.out' # static lib #v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] v['cstlib_PATTERN'] = '%s.lib' def configure(conf): v = conf.env v.TI_CGT_DIR = getattr(Options.options, 'ti-cgt-dir', "") v.TI_DSPLINK_DIR = getattr(Options.options, 'ti-dsplink-dir', "") v.TI_BIOSUTILS_DIR = getattr(Options.options, 'ti-biosutils-dir', "") v.TI_DSPBIOS_DIR = getattr(Options.options, 'ti-dspbios-dir', "") v.TI_XDCTOOLS_DIR = getattr(Options.options, 'ti-xdctools-dir', "") conf.find_ticc() conf.find_tiar() conf.find_tild() conf.ticc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() conf.find_program(['tconf'], var='TCONF', path_list=v.TI_XDCTOOLS_DIR) conf.env.TCONF_INCLUDES += [ opj(conf.env.TI_DSPBIOS_DIR, 'packages'), ] conf.env.INCLUDES += [ opj(conf.env.TI_CGT_DIR, 'include'), ] conf.env.LIBPATH += [ opj(conf.env.TI_CGT_DIR, "lib"), ] conf.env.INCLUDES_DSPBIOS += [ opj(conf.env.TI_DSPBIOS_DIR, 'packages', 'ti', 'bios', 'include'), ] conf.env.LIBPATH_DSPBIOS += [ opj(conf.env.TI_DSPBIOS_DIR, 'packages', 'ti', 'bios', 'lib'), ] conf.env.INCLUDES_DSPLINK += [ opj(conf.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc'), ] @conf def ti_set_debug(cfg, debug=1): """ Sets debug flags for the compiler. TODO: - for each TI CFLAG/INCLUDES/LINKFLAGS/LIBPATH replace RELEASE by DEBUG - -g --no_compress """ if debug: cfg.env.CFLAGS += "-d_DEBUG -dDEBUG -dDDSP_DEBUG".split() @conf def ti_dsplink_set_platform_flags(cfg, splat, dsp, dspbios_ver, board): """ Sets the INCLUDES, LINKFLAGS for DSPLINK and TCONF_INCLUDES For the specific hardware. Assumes that DSPLINK was built in its own folder. :param splat: short platform name (eg. OMAPL138) :param dsp: DSP name (eg. 674X) :param dspbios_ver: string identifying DspBios version (eg. 5.XX) :param board: board name (eg. OMAPL138GEM) """ d1 = opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', 'DspBios', dspbios_ver) d = opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', 'DspBios', dspbios_ver, board) cfg.env.TCONF_INCLUDES += [d1, d] cfg.env.INCLUDES_DSPLINK += [ opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', dsp), d, ] cfg.env.LINKFLAGS_DSPLINK += [ opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'export', 'BIN', 'DspBios', splat, board+'_0', 'RELEASE', 'dsplink%s.lib' % x) for x in ('', 'pool', 'mpcs', 'mplist', 'msg', 'data', 'notify', 'ringio') ] def options(opt): opt.add_option('--with-ti-cgt', type='string', dest='ti-cgt-dir', help = 'Specify alternate cgt root folder', default="") opt.add_option('--with-ti-biosutils', type='string', dest='ti-biosutils-dir', help = 'Specify alternate biosutils folder', default="") opt.add_option('--with-ti-dspbios', type='string', dest='ti-dspbios-dir', help = 'Specify alternate dspbios folder', default="") opt.add_option('--with-ti-dsplink', type='string', dest='ti-dsplink-dir', help = 'Specify alternate dsplink folder', default="") opt.add_option('--with-ti-xdctools', type='string', dest='ti-xdctools-dir', help = 'Specify alternate xdctools folder', default="") class ti_cprogram(cprogram): """ Link object files into a c program Changes: - the linked executable to have a relative path (because we can) - put the LIBPATH first """ run_str = '${LINK_CC} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].bldpath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ' @feature("c") @before_method('apply_link') def use_ti_cprogram(self): """ Automatically uses ti_cprogram link process """ if 'cprogram' in self.features and self.env.CC_NAME == 'ticc': self.features.insert(0, "ti_cprogram") class ti_c(Task.Task): """ Compile task for the TI codegen compiler This compiler does not allow specifying the output file name, only the output path. """ "Compile C files into object files" run_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${SRC} -c ${OUT} ${CPPFLAGS}' vars = ['CCDEPS'] # unused variable to depend on, just in case ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] scan = c_preproc.scan def create_compiled_task(self, name, node): """ Overrides ccroot.create_compiled_task to support ti_c """ out = '%s' % (node.change_ext('.obj').name) if self.env.CC_NAME == 'ticc': name = 'ti_c' task = self.create_task(name, node, node.parent.find_or_declare(out)) self.env.OUT = '-fr%s' % (node.parent.get_bld().abspath()) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @TaskGen.extension('.c') def c_hook(self, node): "Bind the c file extension to the creation of a :py:class:`waflib.Tools.c.c` instance" if self.env.CC_NAME == 'ticc': return create_compiled_task(self, 'ti_c', node) else: return self.create_compiled_task('c', node) @feature("ti-tconf") @before_method('process_source') def apply_tconf(self): sources = [x.get_src() for x in self.to_nodes(self.source, path=self.path.get_src())] node = sources[0] assert(sources[0].name.endswith(".tcf")) if len(sources) > 1: assert(sources[1].name.endswith(".cmd")) target = getattr(self, 'target', self.source) target_node = node.get_bld().parent.find_or_declare(node.name) procid = "%d" % int(getattr(self, 'procid', 0)) importpaths = [] includes = Utils.to_list(getattr(self, 'includes', [])) for x in includes + self.env.TCONF_INCLUDES: if x == os.path.abspath(x): importpaths.append(x) else: relpath = self.path.find_node(x).path_from(target_node.parent) importpaths.append(relpath) task = self.create_task('ti_tconf', sources, target_node.change_ext('.cdb')) task.path = self.path task.includes = includes task.cwd = target_node.parent.abspath() task.env = self.env.derive() task.env["TCONFSRC"] = node.path_from(target_node.parent) task.env["TCONFINC"] = '-Dconfig.importPath=%s' % ";".join(importpaths) task.env['TCONFPROGNAME'] = '-Dconfig.programName=%s' % target task.env['PROCID'] = procid task.outputs = [ target_node.change_ext("cfg_c.c"), target_node.change_ext("cfg.s62"), target_node.change_ext("cfg.cmd"), ] create_compiled_task(self, 'ti_c', task.outputs[1]) ctask = create_compiled_task(self, 'ti_c', task.outputs[0]) ctask.env = self.env.derive() self.add_those_o_files(target_node.change_ext("cfg.cmd")) if len(sources) > 1: self.add_those_o_files(sources[1]) self.source = [] re_tconf_include = re.compile(r'(?Putils\.importFile)\("(?P.*)"\)',re.M) class ti_tconf(Task.Task): run_str = '${TCONF} ${TCONFINC} ${TCONFPROGNAME} ${TCONFSRC} ${PROCID}' color = 'PINK' def scan(self): includes = Utils.to_list(getattr(self, 'includes', [])) def deps(node): nodes, names = [], [] if node: code = Utils.readf(node.abspath()) for match in re_tconf_include.finditer(code): path = match.group('file') if path: for x in includes: filename = opj(x, path) fi = self.path.find_resource(filename) if fi: subnodes, subnames = deps(fi) nodes += subnodes names += subnames nodes.append(fi) names.append(path) break return nodes, names return deps(self.inputs[0]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/unity.py0000660000000000000000000000547000000000000022443 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Compile whole groups of C/C++ files at once (C and C++ files are processed independently though). To enable globally:: def options(opt): opt.load('compiler_cxx') def build(bld): bld.load('compiler_cxx unity') To enable for specific task generators only:: def build(bld): bld(features='c cprogram unity', source='main.c', ...) The file order is often significant in such builds, so it can be necessary to adjust the order of source files and the batch sizes. To control the amount of files processed in a batch per target (the default is 50):: def build(bld): bld(features='c cprogram', unity_size=20) """ from waflib import Task, Options from waflib.Tools import c_preproc from waflib import TaskGen MAX_BATCH = 50 EXTS_C = ('.c',) EXTS_CXX = ('.cpp','.cc','.cxx','.C','.c++') def options(opt): global MAX_BATCH opt.add_option('--batchsize', action='store', dest='batchsize', type='int', default=MAX_BATCH, help='default unity batch size (0 disables unity builds)') @TaskGen.taskgen_method def batch_size(self): default = getattr(Options.options, 'batchsize', MAX_BATCH) if default < 1: return 0 return getattr(self, 'unity_size', default) class unity(Task.Task): color = 'BLUE' scan = c_preproc.scan def to_include(self, node): ret = node.path_from(self.outputs[0].parent) ret = ret.replace('\\', '\\\\').replace('"', '\\"') return ret def run(self): lst = ['#include "%s"\n' % self.to_include(node) for node in self.inputs] txt = ''.join(lst) self.outputs[0].write(txt) def __str__(self): node = self.outputs[0] return node.path_from(node.ctx.launch_node()) def bind_unity(obj, cls_name, exts): if not 'mappings' in obj.__dict__: obj.mappings = dict(obj.mappings) for j in exts: fun = obj.mappings[j] if fun.__name__ == 'unity_fun': raise ValueError('Attempt to bind unity mappings multiple times %r' % j) def unity_fun(self, node): cnt = self.batch_size() if cnt <= 1: return fun(self, node) x = getattr(self, 'master_%s' % cls_name, None) if not x or len(x.inputs) >= cnt: x = self.create_task('unity') setattr(self, 'master_%s' % cls_name, x) cnt_cur = getattr(self, 'cnt_%s' % cls_name, 0) c_node = node.parent.find_or_declare('unity_%s_%d_%d.%s' % (self.idx, cnt_cur, cnt, cls_name)) x.outputs = [c_node] setattr(self, 'cnt_%s' % cls_name, cnt_cur + 1) fun(self, c_node) x.inputs.append(node) obj.mappings[j] = unity_fun @TaskGen.feature('unity') @TaskGen.before('process_source') def single_unity(self): lst = self.to_list(self.features) if 'c' in lst: bind_unity(self, 'c', EXTS_C) if 'cxx' in lst: bind_unity(self, 'cxx', EXTS_CXX) def build(bld): if bld.env.CC_NAME: bind_unity(TaskGen.task_gen, 'c', EXTS_C) if bld.env.CXX_NAME: bind_unity(TaskGen.task_gen, 'cxx', EXTS_CXX) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/use_config.py0000660000000000000000000001303100000000000023404 0ustar00rootroot00000000000000#!/usr/bin/env python # coding=utf-8 # Mathieu Courtois - EDF R&D, 2013 - http://www.code-aster.org """ When a project has a lot of options the 'waf configure' command line can be very long and it becomes a cause of error. This tool provides a convenient way to load a set of configuration parameters from a local file or from a remote url. The configuration parameters are stored in a Python file that is imported as an extra waf tool can be. Example: $ waf configure --use-config-dir=http://www.anywhere.org --use-config=myconf1 ... The file 'myconf1' will be downloaded from 'http://www.anywhere.org' (or 'http://www.anywhere.org/wafcfg'). If the files are available locally, it could be: $ waf configure --use-config-dir=/somewhere/myconfigurations --use-config=myconf1 ... The configuration of 'myconf1.py' is automatically loaded by calling its 'configure' function. In this example, it defines environment variables and set options: def configure(self): self.env['CC'] = 'gcc-4.8' self.env.append_value('LIBPATH', [...]) self.options.perlbinary = '/usr/local/bin/perl' self.options.pyc = False The corresponding command line should have been: $ CC=gcc-4.8 LIBPATH=... waf configure --nopyc --with-perl-binary=/usr/local/bin/perl This is an extra tool, not bundled with the default waf binary. To add the use_config tool to the waf file: $ ./waf-light --tools=use_config When using this tool, the wscript will look like: def options(opt): opt.load('use_config') def configure(conf): conf.load('use_config') """ import sys import os.path as osp import os local_repo = '' """Local repository containing additional Waf tools (plugins)""" remote_repo = 'https://gitlab.com/ita1024/waf/raw/master/' """ Remote directory containing downloadable waf tools. The missing tools can be downloaded by using:: $ waf configure --download """ remote_locs = ['waflib/extras', 'waflib/Tools'] """ Remote directories for use with :py:const:`waflib.extras.use_config.remote_repo` """ try: from urllib import request except ImportError: from urllib import urlopen else: urlopen = request.urlopen from waflib import Errors, Context, Logs, Utils, Options, Configure try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse DEFAULT_DIR = 'wafcfg' # add first the current wafcfg subdirectory sys.path.append(osp.abspath(DEFAULT_DIR)) def options(self): group = self.add_option_group('configure options') group.add_option('--download', dest='download', default=False, action='store_true', help='try to download the tools if missing') group.add_option('--use-config', action='store', default=None, metavar='CFG', dest='use_config', help='force the configuration parameters by importing ' 'CFG.py. Several modules may be provided (comma ' 'separated).') group.add_option('--use-config-dir', action='store', default=DEFAULT_DIR, metavar='CFG_DIR', dest='use_config_dir', help='path or url where to find the configuration file') def download_check(node): """ Hook to check for the tools which are downloaded. Replace with your function if necessary. """ pass def download_tool(tool, force=False, ctx=None): """ Download a Waf tool from the remote repository defined in :py:const:`waflib.extras.use_config.remote_repo`:: $ waf configure --download """ for x in Utils.to_list(remote_repo): for sub in Utils.to_list(remote_locs): url = '/'.join((x, sub, tool + '.py')) try: web = urlopen(url) try: if web.getcode() != 200: continue except AttributeError: pass except Exception: # on python3 urlopen throws an exception # python 2.3 does not have getcode and throws an exception to fail continue else: tmp = ctx.root.make_node(os.sep.join((Context.waf_dir, 'waflib', 'extras', tool + '.py'))) tmp.write(web.read(), 'wb') Logs.warn('Downloaded %s from %s', tool, url) download_check(tmp) try: module = Context.load_tool(tool) except Exception: Logs.warn('The tool %s from %s is unusable', tool, url) try: tmp.delete() except Exception: pass continue return module raise Errors.WafError('Could not load the Waf tool') def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True): try: module = Context.load_tool_default(tool, tooldir, ctx, with_sys_path) except ImportError as e: if not ctx or not hasattr(Options.options, 'download'): Logs.error('Could not load %r during options phase (download unavailable at this point)' % tool) raise if Options.options.download: module = download_tool(tool, ctx=ctx) if not module: ctx.fatal('Could not load the Waf tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e)) else: ctx.fatal('Could not load the Waf tool %r from %r (try the --download option?):\n%s' % (tool, sys.path, e)) return module Context.load_tool_default = Context.load_tool Context.load_tool = load_tool Configure.download_tool = download_tool def configure(self): opts = self.options use_cfg = opts.use_config if use_cfg is None: return url = urlparse(opts.use_config_dir) kwargs = {} if url.scheme: kwargs['download'] = True kwargs['remote_url'] = url.geturl() # search first with the exact url, else try with +'/wafcfg' kwargs['remote_locs'] = ['', DEFAULT_DIR] tooldir = url.geturl() + ' ' + DEFAULT_DIR for cfg in use_cfg.split(','): Logs.pprint('NORMAL', "Searching configuration '%s'..." % cfg) self.load(cfg, tooldir=tooldir, **kwargs) self.start_msg('Checking for configuration') self.end_msg(use_cfg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/valadoc.py0000660000000000000000000001053500000000000022702 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Nicolas Joseph 2009 """ ported from waf 1.5: TODO: tabs vs spaces """ from waflib import Task, Utils, Errors, Logs from waflib.TaskGen import feature VALADOC_STR = '${VALADOC}' class valadoc(Task.Task): vars = ['VALADOC', 'VALADOCFLAGS'] color = 'BLUE' after = ['cprogram', 'cstlib', 'cshlib', 'cxxprogram', 'cxxstlib', 'cxxshlib'] quiet = True # no outputs .. this is weird def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.output_dir = '' self.doclet = '' self.package_name = '' self.package_version = '' self.files = [] self.vapi_dirs = [] self.protected = True self.private = False self.inherit = False self.deps = False self.vala_defines = [] self.vala_target_glib = None self.enable_non_null_experimental = False self.force = False def run(self): if not self.env['VALADOCFLAGS']: self.env['VALADOCFLAGS'] = '' cmd = [Utils.subst_vars(VALADOC_STR, self.env)] cmd.append ('-o %s' % self.output_dir) if getattr(self, 'doclet', None): cmd.append ('--doclet %s' % self.doclet) cmd.append ('--package-name %s' % self.package_name) if getattr(self, 'package_version', None): cmd.append ('--package-version %s' % self.package_version) if getattr(self, 'packages', None): for package in self.packages: cmd.append ('--pkg %s' % package) if getattr(self, 'vapi_dirs', None): for vapi_dir in self.vapi_dirs: cmd.append ('--vapidir %s' % vapi_dir) if not getattr(self, 'protected', None): cmd.append ('--no-protected') if getattr(self, 'private', None): cmd.append ('--private') if getattr(self, 'inherit', None): cmd.append ('--inherit') if getattr(self, 'deps', None): cmd.append ('--deps') if getattr(self, 'vala_defines', None): for define in self.vala_defines: cmd.append ('--define %s' % define) if getattr(self, 'vala_target_glib', None): cmd.append ('--target-glib=%s' % self.vala_target_glib) if getattr(self, 'enable_non_null_experimental', None): cmd.append ('--enable-non-null-experimental') if getattr(self, 'force', None): cmd.append ('--force') cmd.append (' '.join ([x.abspath() for x in self.files])) return self.generator.bld.exec_command(' '.join(cmd)) @feature('valadoc') def process_valadoc(self): """ Generate API documentation from Vala source code with valadoc doc = bld( features = 'valadoc', output_dir = '../doc/html', package_name = 'vala-gtk-example', package_version = '1.0.0', packages = 'gtk+-2.0', vapi_dirs = '../vapi', force = True ) path = bld.path.find_dir ('../src') doc.files = path.ant_glob (incl='**/*.vala') """ task = self.create_task('valadoc') if getattr(self, 'output_dir', None): task.output_dir = self.path.find_or_declare(self.output_dir).abspath() else: Errors.WafError('no output directory') if getattr(self, 'doclet', None): task.doclet = self.doclet else: Errors.WafError('no doclet directory') if getattr(self, 'package_name', None): task.package_name = self.package_name else: Errors.WafError('no package name') if getattr(self, 'package_version', None): task.package_version = self.package_version if getattr(self, 'packages', None): task.packages = Utils.to_list(self.packages) if getattr(self, 'vapi_dirs', None): vapi_dirs = Utils.to_list(self.vapi_dirs) for vapi_dir in vapi_dirs: try: task.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath()) except AttributeError: Logs.warn('Unable to locate Vala API directory: %r', vapi_dir) if getattr(self, 'files', None): task.files = self.files else: Errors.WafError('no input file') if getattr(self, 'protected', None): task.protected = self.protected if getattr(self, 'private', None): task.private = self.private if getattr(self, 'inherit', None): task.inherit = self.inherit if getattr(self, 'deps', None): task.deps = self.deps if getattr(self, 'vala_defines', None): task.vala_defines = Utils.to_list(self.vala_defines) if getattr(self, 'vala_target_glib', None): task.vala_target_glib = self.vala_target_glib if getattr(self, 'enable_non_null_experimental', None): task.enable_non_null_experimental = self.enable_non_null_experimental if getattr(self, 'force', None): task.force = self.force def configure(conf): conf.find_program('valadoc', errmsg='You must install valadoc for generate the API documentation') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/waf_xattr.py0000660000000000000000000001006000000000000023261 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Use extended attributes instead of database files 1. Input files will be made writable 2. This is only for systems providing extended filesystem attributes 3. By default, hashes are calculated only if timestamp/size change (HASH_CACHE below) 4. The module enables "deep_inputs" on all tasks by propagating task signatures 5. This module also skips task signature comparisons for task code changes due to point 4. 6. This module is for Python3/Linux only, but it could be extended to Python2/other systems using the xattr library 7. For projects in which tasks always declare output files, it should be possible to store the rest of build context attributes on output files (imp_sigs, raw_deps and node_deps) but this is not done here On a simple C++ project benchmark, the variations before and after adding waf_xattr.py were observed: total build time: 20s -> 22s no-op build time: 2.4s -> 1.8s pickle file size: 2.9MB -> 2.6MB """ import os from waflib import Logs, Node, Task, Utils, Errors from waflib.Task import SKIP_ME, RUN_ME, CANCEL_ME, ASK_LATER, SKIPPED, MISSING HASH_CACHE = True SIG_VAR = 'user.waf.sig' SEP = ','.encode() TEMPLATE = '%b%d,%d'.encode() try: PermissionError except NameError: PermissionError = IOError def getxattr(self): return os.getxattr(self.abspath(), SIG_VAR) def setxattr(self, val): os.setxattr(self.abspath(), SIG_VAR, val) def h_file(self): try: ret = getxattr(self) except OSError: if HASH_CACHE: st = os.stat(self.abspath()) mtime = st.st_mtime size = st.st_size else: if len(ret) == 16: # for build directory files return ret if HASH_CACHE: # check if timestamp and mtime match to avoid re-hashing st = os.stat(self.abspath()) mtime, size = ret[16:].split(SEP) if int(1000 * st.st_mtime) == int(mtime) and st.st_size == int(size): return ret[:16] ret = Utils.h_file(self.abspath()) if HASH_CACHE: val = TEMPLATE % (ret, int(1000 * st.st_mtime), int(st.st_size)) try: setxattr(self, val) except PermissionError: os.chmod(self.abspath(), st.st_mode | 128) setxattr(self, val) return ret def runnable_status(self): bld = self.generator.bld if bld.is_install < 0: return SKIP_ME for t in self.run_after: if not t.hasrun: return ASK_LATER elif t.hasrun < SKIPPED: # a dependency has an error return CANCEL_ME # first compute the signature try: new_sig = self.signature() except Errors.TaskNotReady: return ASK_LATER if not self.outputs: # compare the signature to a signature computed previously # this part is only for tasks with no output files key = self.uid() try: prev_sig = bld.task_sigs[key] except KeyError: Logs.debug('task: task %r must run: it was never run before or the task code changed', self) return RUN_ME if new_sig != prev_sig: Logs.debug('task: task %r must run: the task signature changed', self) return RUN_ME # compare the signatures of the outputs to make a decision for node in self.outputs: try: sig = node.h_file() except EnvironmentError: Logs.debug('task: task %r must run: an output node does not exist', self) return RUN_ME if sig != new_sig: Logs.debug('task: task %r must run: an output node is stale', self) return RUN_ME return (self.always_run and RUN_ME) or SKIP_ME def post_run(self): bld = self.generator.bld sig = self.signature() for node in self.outputs: if not node.exists(): self.hasrun = MISSING self.err_msg = '-> missing file: %r' % node.abspath() raise Errors.WafError(self.err_msg) os.setxattr(node.abspath(), 'user.waf.sig', sig) if not self.outputs: # only for task with no outputs bld.task_sigs[self.uid()] = sig if not self.keep_last_cmd: try: del self.last_cmd except AttributeError: pass try: os.getxattr except AttributeError: pass else: h_file.__doc__ = Node.Node.h_file.__doc__ # keep file hashes as file attributes Node.Node.h_file = h_file # enable "deep_inputs" on all tasks Task.Task.runnable_status = runnable_status Task.Task.post_run = post_run Task.Task.sig_deep_inputs = Utils.nada ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/wafcache.py0000660000000000000000000003622300000000000023034 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2019 (ita) """ Filesystem-based cache system to share and re-use build artifacts Cache access operations (copy to and from) are delegated to independent pre-forked worker subprocesses. The following environment variables may be set: * WAFCACHE: several possibilities: - File cache: absolute path of the waf cache (~/.cache/wafcache_user, where `user` represents the currently logged-in user) - URL to a cache server, for example: export WAFCACHE=http://localhost:8080/files/ in that case, GET/POST requests are made to urls of the form http://localhost:8080/files/000000000/0 (cache management is delegated to the server) - GCS, S3 or MINIO bucket gs://my-bucket/ (uses gsutil command line tool or WAFCACHE_CMD) s3://my-bucket/ (uses aws command line tool or WAFCACHE_CMD) minio://my-bucket/ (uses mc command line tool or WAFCACHE_CMD) * WAFCACHE_CMD: bucket upload/download command, for example: WAFCACHE_CMD="gsutil cp %{SRC} %{TGT}" Note that the WAFCACHE bucket value is used for the source or destination depending on the operation (upload or download). For example, with: WAFCACHE="gs://mybucket/" the following commands may be run: gsutil cp build/myprogram gs://mybucket/aa/aaaaa/1 gsutil cp gs://mybucket/bb/bbbbb/2 build/somefile * WAFCACHE_NO_PUSH: if set, disables pushing to the cache * WAFCACHE_VERBOSITY: if set, displays more detailed cache operations File cache specific options: Files are copied using hard links by default; if the cache is located onto another partition, the system switches to file copies instead. * WAFCACHE_TRIM_MAX_FOLDER: maximum amount of tasks to cache (1M) * WAFCACHE_EVICT_MAX_BYTES: maximum amount of cache size in bytes (10GB) * WAFCACHE_EVICT_INTERVAL_MINUTES: minimum time interval to try and trim the cache (3 minutess) Usage:: def build(bld): bld.load('wafcache') ... To troubleshoot:: waf clean build --zones=wafcache """ import atexit, base64, errno, fcntl, getpass, os, re, shutil, sys, time, traceback, urllib3, shlex try: import subprocess32 as subprocess except ImportError: import subprocess base_cache = os.path.expanduser('~/.cache/') if not os.path.isdir(base_cache): base_cache = '/tmp/' default_wafcache_dir = os.path.join(base_cache, 'wafcache_' + getpass.getuser()) CACHE_DIR = os.environ.get('WAFCACHE', default_wafcache_dir) WAFCACHE_CMD = os.environ.get('WAFCACHE_CMD') TRIM_MAX_FOLDERS = int(os.environ.get('WAFCACHE_TRIM_MAX_FOLDER', 1000000)) EVICT_INTERVAL_MINUTES = int(os.environ.get('WAFCACHE_EVICT_INTERVAL_MINUTES', 3)) EVICT_MAX_BYTES = int(os.environ.get('WAFCACHE_EVICT_MAX_BYTES', 10**10)) WAFCACHE_NO_PUSH = 1 if os.environ.get('WAFCACHE_NO_PUSH') else 0 WAFCACHE_VERBOSITY = 1 if os.environ.get('WAFCACHE_VERBOSITY') else 0 OK = "ok" re_waf_cmd = re.compile('(?P%{SRC})|(?P%{TGT})') try: import cPickle except ImportError: import pickle as cPickle if __name__ != '__main__': from waflib import Task, Logs, Utils, Build def can_retrieve_cache(self): """ New method for waf Task classes """ if not self.outputs: return False self.cached = False sig = self.signature() ssig = Utils.to_hex(self.uid() + sig) files_to = [node.abspath() for node in self.outputs] err = cache_command(ssig, [], files_to) if err.startswith(OK): if WAFCACHE_VERBOSITY: Logs.pprint('CYAN', ' Fetched %r from cache' % files_to) else: Logs.debug('wafcache: fetched %r from cache', files_to) else: if WAFCACHE_VERBOSITY: Logs.pprint('YELLOW', ' No cache entry %s' % files_to) else: Logs.debug('wafcache: No cache entry %s: %s', files_to, err) return False self.cached = True return True def put_files_cache(self): """ New method for waf Task classes """ if WAFCACHE_NO_PUSH or getattr(self, 'cached', None) or not self.outputs: return bld = self.generator.bld sig = self.signature() ssig = Utils.to_hex(self.uid() + sig) files_from = [node.abspath() for node in self.outputs] err = cache_command(ssig, files_from, []) if err.startswith(OK): if WAFCACHE_VERBOSITY: Logs.pprint('CYAN', ' Successfully uploaded %s to cache' % files_from) else: Logs.debug('wafcache: Successfully uploaded %r to cache', files_from) else: if WAFCACHE_VERBOSITY: Logs.pprint('RED', ' Error caching step results %s: %s' % (files_from, err)) else: Logs.debug('wafcache: Error caching results %s: %s', files_from, err) bld.task_sigs[self.uid()] = self.cache_sig def hash_env_vars(self, env, vars_lst): """ Reimplement BuildContext.hash_env_vars so that the resulting hash does not depend on local paths """ if not env.table: env = env.parent if not env: return Utils.SIG_NIL idx = str(id(env)) + str(vars_lst) try: cache = self.cache_env except AttributeError: cache = self.cache_env = {} else: try: return self.cache_env[idx] except KeyError: pass v = str([env[a] for a in vars_lst]) v = v.replace(self.srcnode.abspath().__repr__()[:-1], '') m = Utils.md5() m.update(v.encode()) ret = m.digest() Logs.debug('envhash: %r %r', ret, v) cache[idx] = ret return ret def uid(self): """ Reimplement Task.uid() so that the signature does not depend on local paths """ try: return self.uid_ except AttributeError: m = Utils.md5() src = self.generator.bld.srcnode up = m.update up(self.__class__.__name__.encode()) for x in self.inputs + self.outputs: up(x.path_from(src).encode()) self.uid_ = m.digest() return self.uid_ def make_cached(cls): """ Enable the waf cache for a given task class """ if getattr(cls, 'nocache', None) or getattr(cls, 'has_cache', False): return m1 = getattr(cls, 'run', None) def run(self): if getattr(self, 'nocache', False): return m1(self) if self.can_retrieve_cache(): return 0 return m1(self) cls.run = run m2 = getattr(cls, 'post_run', None) def post_run(self): if getattr(self, 'nocache', False): return m2(self) ret = m2(self) self.put_files_cache() if hasattr(self, 'chmod'): for node in self.outputs: os.chmod(node.abspath(), self.chmod) return ret cls.post_run = post_run cls.has_cache = True process_pool = [] def get_process(): """ Returns a worker process that can process waf cache commands The worker process is assumed to be returned to the process pool when unused """ try: return process_pool.pop() except IndexError: filepath = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'wafcache.py' cmd = [sys.executable, '-c', Utils.readf(filepath)] return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0) def atexit_pool(): for k in process_pool: try: os.kill(k.pid, 9) except OSError: pass else: k.wait() atexit.register(atexit_pool) def build(bld): """ Called during the build process to enable file caching """ if process_pool: # already called once return # pre-allocation processes = [get_process() for x in range(bld.jobs)] process_pool.extend(processes) Task.Task.can_retrieve_cache = can_retrieve_cache Task.Task.put_files_cache = put_files_cache Task.Task.uid = uid Build.BuildContext.hash_env_vars = hash_env_vars for x in reversed(list(Task.classes.values())): make_cached(x) def cache_command(sig, files_from, files_to): """ Create a command for cache worker processes, returns a pickled base64-encoded tuple containing the task signature, a list of files to cache and a list of files files to get from cache (one of the lists is assumed to be empty) """ proc = get_process() obj = base64.b64encode(cPickle.dumps([sig, files_from, files_to])) proc.stdin.write(obj) proc.stdin.write('\n'.encode()) proc.stdin.flush() obj = proc.stdout.readline() if not obj: raise OSError('Preforked sub-process %r died' % proc.pid) process_pool.append(proc) return cPickle.loads(base64.b64decode(obj)) try: copyfun = os.link except NameError: copyfun = shutil.copy2 def atomic_copy(orig, dest): """ Copy files to the cache, the operation is atomic for a given file """ global copyfun tmp = dest + '.tmp' up = os.path.dirname(dest) try: os.makedirs(up) except OSError: pass try: copyfun(orig, tmp) except OSError as e: if e.errno == errno.EXDEV: copyfun = shutil.copy2 copyfun(orig, tmp) else: raise os.rename(tmp, dest) def lru_trim(): """ the cache folders take the form: `CACHE_DIR/0b/0b180f82246d726ece37c8ccd0fb1cde2650d7bfcf122ec1f169079a3bfc0ab9` they are listed in order of last access, and then removed until the amount of folders is within TRIM_MAX_FOLDERS and the total space taken by files is less than EVICT_MAX_BYTES """ lst = [] for up in os.listdir(CACHE_DIR): if len(up) == 2: sub = os.path.join(CACHE_DIR, up) for hval in os.listdir(sub): path = os.path.join(sub, hval) size = 0 for fname in os.listdir(path): size += os.lstat(os.path.join(path, fname)).st_size lst.append((os.stat(path).st_mtime, size, path)) lst.sort(key=lambda x: x[0]) lst.reverse() tot = sum(x[1] for x in lst) while tot > EVICT_MAX_BYTES or len(lst) > TRIM_MAX_FOLDERS: _, tmp_size, path = lst.pop() tot -= tmp_size tmp = path + '.tmp' try: shutil.rmtree(tmp) except OSError: pass try: os.rename(path, tmp) except OSError: sys.stderr.write('Could not rename %r to %r' % (path, tmp)) else: try: shutil.rmtree(tmp) except OSError: sys.stderr.write('Could not remove %r' % tmp) sys.stderr.write("Cache trimmed: %r bytes in %r folders left\n" % (tot, len(lst))) def lru_evict(): """ Reduce the cache size """ lockfile = os.path.join(CACHE_DIR, 'all.lock') try: st = os.stat(lockfile) except EnvironmentError as e: if e.errno == errno.ENOENT: with open(lockfile, 'w') as f: f.write('') return else: raise if st.st_mtime < time.time() - EVICT_INTERVAL_MINUTES * 60: # check every EVICT_INTERVAL_MINUTES minutes if the cache is too big # OCLOEXEC is unnecessary because no processes are spawned fd = os.open(lockfile, os.O_RDWR | os.O_CREAT, 0o755) try: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError: sys.stderr.write('another process is running!\n') pass else: # now dow the actual cleanup lru_trim() os.utime(lockfile, None) finally: os.close(fd) class netcache(object): def __init__(self): self.http = urllib3.PoolManager() def url_of(self, sig, i): return "%s/%s/%s" % (CACHE_DIR, sig, i) def upload(self, file_path, sig, i): url = self.url_of(sig, i) with open(file_path, 'rb') as f: file_data = f.read() r = self.http.request('POST', url, timeout=60, fields={ 'file': ('%s/%s' % (sig, i), file_data), }) if r.status >= 400: raise OSError("Invalid status %r %r" % (url, r.status)) def download(self, file_path, sig, i): url = self.url_of(sig, i) with self.http.request('GET', url, preload_content=False, timeout=60) as inf: if inf.status >= 400: raise OSError("Invalid status %r %r" % (url, inf.status)) with open(file_path, 'wb') as out: shutil.copyfileobj(inf, out) def copy_to_cache(self, sig, files_from, files_to): try: for i, x in enumerate(files_from): if not os.path.islink(x): self.upload(x, sig, i) except Exception: return traceback.format_exc() return OK def copy_from_cache(self, sig, files_from, files_to): try: for i, x in enumerate(files_to): self.download(x, sig, i) except Exception: return traceback.format_exc() return OK class fcache(object): def __init__(self): if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR) if not os.path.exists(CACHE_DIR): raise ValueError('Could not initialize the cache directory') def copy_to_cache(self, sig, files_from, files_to): """ Copy files to the cache, existing files are overwritten, and the copy is atomic only for a given file, not for all files that belong to a given task object """ try: for i, x in enumerate(files_from): dest = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) atomic_copy(x, dest) except Exception: return traceback.format_exc() else: # attempt trimming if caching was successful: # we may have things to trim! lru_evict() return OK def copy_from_cache(self, sig, files_from, files_to): """ Copy files from the cache """ try: for i, x in enumerate(files_to): orig = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) atomic_copy(orig, x) # success! update the cache time os.utime(os.path.join(CACHE_DIR, sig[:2], sig), None) except Exception: return traceback.format_exc() return OK class bucket_cache(object): def bucket_copy(self, source, target): if WAFCACHE_CMD: def replacer(match): if match.group('src'): return source elif match.group('tgt'): return target cmd = [re_waf_cmd.sub(replacer, x) for x in shlex.split(WAFCACHE_CMD)] elif CACHE_DIR.startswith('s3://'): cmd = ['aws', 's3', 'cp', source, target] elif CACHE_DIR.startswith('gs://'): cmd = ['gsutil', 'cp', source, target] else: cmd = ['mc', 'cp', source, target] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() if proc.returncode: raise OSError('Error copy %r to %r using: %r (exit %r):\n out:%s\n err:%s' % ( source, target, cmd, proc.returncode, out.decode(), err.decode())) def copy_to_cache(self, sig, files_from, files_to): try: for i, x in enumerate(files_from): dest = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) self.bucket_copy(x, dest) except Exception: return traceback.format_exc() return OK def copy_from_cache(self, sig, files_from, files_to): try: for i, x in enumerate(files_to): orig = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) self.bucket_copy(orig, x) except EnvironmentError: return traceback.format_exc() return OK def loop(service): """ This function is run when this file is run as a standalone python script, it assumes a parent process that will communicate the commands to it as pickled-encoded tuples (one line per command) The commands are to copy files to the cache or copy files from the cache to a target destination """ # one operation is performed at a single time by a single process # therefore stdin never has more than one line txt = sys.stdin.readline().strip() if not txt: # parent process probably ended sys.exit(1) ret = OK [sig, files_from, files_to] = cPickle.loads(base64.b64decode(txt)) if files_from: # TODO return early when pushing files upstream ret = service.copy_to_cache(sig, files_from, files_to) elif files_to: # the build process waits for workers to (possibly) obtain files from the cache ret = service.copy_from_cache(sig, files_from, files_to) else: ret = "Invalid command" obj = base64.b64encode(cPickle.dumps(ret)) sys.stdout.write(obj.decode()) sys.stdout.write('\n') sys.stdout.flush() if __name__ == '__main__': if CACHE_DIR.startswith('s3://') or CACHE_DIR.startswith('gs://') or CACHE_DIR.startswith('minio://'): if CACHE_DIR.startswith('minio://'): CACHE_DIR = CACHE_DIR[8:] # minio doesn't need the protocol part, uses config aliases service = bucket_cache() elif CACHE_DIR.startswith('http'): service = netcache() else: service = fcache() while 1: try: loop(service) except KeyboardInterrupt: break ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/why.py0000660000000000000000000000354300000000000022101 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) """ This tool modifies the task signature scheme to store and obtain information about the task execution (why it must run, etc):: def configure(conf): conf.load('why') After adding the tool, a full rebuild is necessary: waf clean build --zones=task """ from waflib import Task, Utils, Logs, Errors def signature(self): # compute the result one time, and suppose the scan_signature will give the good result try: return self.cache_sig except AttributeError: pass self.m = Utils.md5() self.m.update(self.hcode) id_sig = self.m.digest() # explicit deps self.m = Utils.md5() self.sig_explicit_deps() exp_sig = self.m.digest() # env vars self.m = Utils.md5() self.sig_vars() var_sig = self.m.digest() # implicit deps / scanner results self.m = Utils.md5() if self.scan: try: self.sig_implicit_deps() except Errors.TaskRescan: return self.signature() impl_sig = self.m.digest() ret = self.cache_sig = impl_sig + id_sig + exp_sig + var_sig return ret Task.Task.signature = signature old = Task.Task.runnable_status def runnable_status(self): ret = old(self) if ret == Task.RUN_ME: try: old_sigs = self.generator.bld.task_sigs[self.uid()] except (KeyError, AttributeError): Logs.debug("task: task must run as no previous signature exists") else: new_sigs = self.cache_sig def v(x): return Utils.to_hex(x) Logs.debug('Task %r', self) msgs = ['* Implicit or scanner dependency', '* Task code', '* Source file, explicit or manual dependency', '* Configuration data variable'] tmp = 'task: -> %s: %s %s' for x in range(len(msgs)): l = len(Utils.SIG_NIL) a = new_sigs[x*l : (x+1)*l] b = old_sigs[x*l : (x+1)*l] if (a != b): Logs.debug(tmp, msgs[x].ljust(35), v(a), v(b)) return ret Task.Task.runnable_status = runnable_status ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/win32_opts.py0000660000000000000000000001114400000000000023275 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Windows-specific optimizations This module can help reducing the overhead of listing files on windows (more than 10000 files). Python 3.5 already provides the listdir optimization though. """ import os from waflib import Utils, Build, Node, Logs try: TP = '%s\\*'.decode('ascii') except AttributeError: TP = '%s\\*' if Utils.is_win32: from waflib.Tools import md5_tstamp import ctypes, ctypes.wintypes FindFirstFile = ctypes.windll.kernel32.FindFirstFileW FindNextFile = ctypes.windll.kernel32.FindNextFileW FindClose = ctypes.windll.kernel32.FindClose FILE_ATTRIBUTE_DIRECTORY = 0x10 INVALID_HANDLE_VALUE = -1 UPPER_FOLDERS = ('.', '..') try: UPPER_FOLDERS = [unicode(x) for x in UPPER_FOLDERS] except NameError: pass def cached_hash_file(self): try: cache = self.ctx.cache_listdir_cache_hash_file except AttributeError: cache = self.ctx.cache_listdir_cache_hash_file = {} if id(self.parent) in cache: try: t = cache[id(self.parent)][self.name] except KeyError: raise IOError('Not a file') else: # an opportunity to list the files and the timestamps at once findData = ctypes.wintypes.WIN32_FIND_DATAW() find = FindFirstFile(TP % self.parent.abspath(), ctypes.byref(findData)) if find == INVALID_HANDLE_VALUE: cache[id(self.parent)] = {} raise IOError('Not a file') cache[id(self.parent)] = lst_files = {} try: while True: if findData.cFileName not in UPPER_FOLDERS: thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY if not thatsadir: ts = findData.ftLastWriteTime d = (ts.dwLowDateTime << 32) | ts.dwHighDateTime lst_files[str(findData.cFileName)] = d if not FindNextFile(find, ctypes.byref(findData)): break except Exception: cache[id(self.parent)] = {} raise IOError('Not a file') finally: FindClose(find) t = lst_files[self.name] fname = self.abspath() if fname in Build.hashes_md5_tstamp: if Build.hashes_md5_tstamp[fname][0] == t: return Build.hashes_md5_tstamp[fname][1] try: fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT) except OSError: raise IOError('Cannot read from %r' % fname) f = os.fdopen(fd, 'rb') m = Utils.md5() rb = 1 try: while rb: rb = f.read(200000) m.update(rb) finally: f.close() # ensure that the cache is overwritten Build.hashes_md5_tstamp[fname] = (t, m.digest()) return m.digest() Node.Node.cached_hash_file = cached_hash_file def get_bld_sig_win32(self): try: return self.ctx.hash_cache[id(self)] except KeyError: pass except AttributeError: self.ctx.hash_cache = {} self.ctx.hash_cache[id(self)] = ret = Utils.h_file(self.abspath()) return ret Node.Node.get_bld_sig = get_bld_sig_win32 def isfile_cached(self): # optimize for nt.stat calls, assuming there are many files for few folders try: cache = self.__class__.cache_isfile_cache except AttributeError: cache = self.__class__.cache_isfile_cache = {} try: c1 = cache[id(self.parent)] except KeyError: c1 = cache[id(self.parent)] = [] curpath = self.parent.abspath() findData = ctypes.wintypes.WIN32_FIND_DATAW() find = FindFirstFile(TP % curpath, ctypes.byref(findData)) if find == INVALID_HANDLE_VALUE: Logs.error("invalid win32 handle isfile_cached %r", self.abspath()) return os.path.isfile(self.abspath()) try: while True: if findData.cFileName not in UPPER_FOLDERS: thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY if not thatsadir: c1.append(str(findData.cFileName)) if not FindNextFile(find, ctypes.byref(findData)): break except Exception as e: Logs.error('exception while listing a folder %r %r', self.abspath(), e) return os.path.isfile(self.abspath()) finally: FindClose(find) return self.name in c1 Node.Node.isfile_cached = isfile_cached def find_or_declare_win32(self, lst): # assuming that "find_or_declare" is called before the build starts, remove the calls to os.path.isfile if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.get_bld().search_node(lst) if node: if not node.isfile_cached(): try: node.parent.mkdir() except OSError: pass return node self = self.get_src() node = self.find_node(lst) if node: if not node.isfile_cached(): try: node.parent.mkdir() except OSError: pass return node node = self.get_bld().make_node(lst) node.parent.mkdir() return node Node.Node.find_or_declare = find_or_declare_win32 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/extras/wix.py0000660000000000000000000000514300000000000022077 0ustar00rootroot00000000000000#!/usr/bin/python # encoding: utf-8 # vim: tabstop=4 noexpandtab """ Windows Installer XML Tool (WiX) .wxs --- candle ---> .wxobj --- light ---> .msi bld(features='wix', some.wxs, gen='some.msi', candleflags=[..], lightflags=[..]) bld(features='wix', source=['bundle.wxs','WixBalExtension'], gen='setup.exe', candleflags=[..]) """ import os, copy from waflib import TaskGen from waflib import Task from waflib.Utils import winreg class candle(Task.Task): run_str = '${CANDLE} -nologo ${CANDLEFLAGS} -out ${TGT} ${SRC[0].abspath()}', class light(Task.Task): run_str = "${LIGHT} -nologo -b ${SRC[0].parent.abspath()} ${LIGHTFLAGS} -out ${TGT} ${SRC[0].abspath()}" @TaskGen.feature('wix') @TaskGen.before_method('process_source') def wix(self): #X.wxs -> ${SRC} for CANDLE #X.wxobj -> ${SRC} for LIGHT #X.dll -> -ext X in ${LIGHTFLAGS} #X.wxl -> wixui.wixlib -loc X.wxl in ${LIGHTFLAGS} wxobj = [] wxs = [] exts = [] wxl = [] rest = [] for x in self.source: if x.endswith('.wxobj'): wxobj.append(x) elif x.endswith('.wxs'): wxobj.append(self.path.find_or_declare(x[:-4]+'.wxobj')) wxs.append(x) elif x.endswith('.dll'): exts.append(x[:-4]) elif '.' not in x: exts.append(x) elif x.endswith('.wxl'): wxl.append(x) else: rest.append(x) self.source = self.to_nodes(rest) #.wxs cndl = self.create_task('candle', self.to_nodes(wxs), self.to_nodes(wxobj)) lght = self.create_task('light', self.to_nodes(wxobj), self.path.find_or_declare(self.gen)) cndl.env.CANDLEFLAGS = copy.copy(getattr(self,'candleflags',[])) lght.env.LIGHTFLAGS = copy.copy(getattr(self,'lightflags',[])) for x in wxl: lght.env.append_value('LIGHTFLAGS','wixui.wixlib') lght.env.append_value('LIGHTFLAGS','-loc') lght.env.append_value('LIGHTFLAGS',x) for x in exts: cndl.env.append_value('CANDLEFLAGS','-ext') cndl.env.append_value('CANDLEFLAGS',x) lght.env.append_value('LIGHTFLAGS','-ext') lght.env.append_value('LIGHTFLAGS',x) #wix_bin_path() def wix_bin_path(): basekey = r"SOFTWARE\Microsoft\.NETFramework\AssemblyFolders" query = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, basekey) cnt=winreg.QueryInfoKey(query)[0] thiskey = r'C:\Program Files (x86)\WiX Toolset v3.10\SDK' for i in range(cnt-1,-1,-1): thiskey = winreg.EnumKey(query,i) if 'WiX' in thiskey: break winreg.CloseKey(query) return os.path.normpath(winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE, basekey+r'\\'+thiskey)+'..\\bin') def configure(ctx): path_list=[wix_bin_path()] ctx.find_program('candle', var='CANDLE', mandatory=True, path_list = path_list) ctx.find_program('light', var='LIGHT', mandatory=True, path_list = path_list) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1625730159.2996032 tevent-0.11.0/third_party/waf/waflib/extras/xcode6.py0000660000000000000000000005722700000000000022472 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # XCode 3/XCode 4/XCode 6/Xcode 7 generator for Waf # Based on work by Nicolas Mercier 2011 # Extended by Simon Warg 2015, https://github.com/mimon # XCode project file format based on http://www.monobjc.net/xcode-project-file-format.html """ See playground/xcode6/ for usage examples. """ from waflib import Context, TaskGen, Build, Utils, Errors, Logs import os, sys # FIXME too few extensions XCODE_EXTS = ['.c', '.cpp', '.m', '.mm'] HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' MAP_EXT = { '': "folder", '.h' : "sourcecode.c.h", '.hh': "sourcecode.cpp.h", '.inl': "sourcecode.cpp.h", '.hpp': "sourcecode.cpp.h", '.c': "sourcecode.c.c", '.m': "sourcecode.c.objc", '.mm': "sourcecode.cpp.objcpp", '.cc': "sourcecode.cpp.cpp", '.cpp': "sourcecode.cpp.cpp", '.C': "sourcecode.cpp.cpp", '.cxx': "sourcecode.cpp.cpp", '.c++': "sourcecode.cpp.cpp", '.l': "sourcecode.lex", # luthor '.ll': "sourcecode.lex", '.y': "sourcecode.yacc", '.yy': "sourcecode.yacc", '.plist': "text.plist.xml", ".nib": "wrapper.nib", ".xib": "text.xib", } # Used in PBXNativeTarget elements PRODUCT_TYPE_APPLICATION = 'com.apple.product-type.application' PRODUCT_TYPE_FRAMEWORK = 'com.apple.product-type.framework' PRODUCT_TYPE_EXECUTABLE = 'com.apple.product-type.tool' PRODUCT_TYPE_LIB_STATIC = 'com.apple.product-type.library.static' PRODUCT_TYPE_LIB_DYNAMIC = 'com.apple.product-type.library.dynamic' PRODUCT_TYPE_EXTENSION = 'com.apple.product-type.kernel-extension' PRODUCT_TYPE_IOKIT = 'com.apple.product-type.kernel-extension.iokit' # Used in PBXFileReference elements FILE_TYPE_APPLICATION = 'wrapper.cfbundle' FILE_TYPE_FRAMEWORK = 'wrapper.framework' FILE_TYPE_LIB_DYNAMIC = 'compiled.mach-o.dylib' FILE_TYPE_LIB_STATIC = 'archive.ar' FILE_TYPE_EXECUTABLE = 'compiled.mach-o.executable' # Tuple packs of the above TARGET_TYPE_FRAMEWORK = (PRODUCT_TYPE_FRAMEWORK, FILE_TYPE_FRAMEWORK, '.framework') TARGET_TYPE_APPLICATION = (PRODUCT_TYPE_APPLICATION, FILE_TYPE_APPLICATION, '.app') TARGET_TYPE_DYNAMIC_LIB = (PRODUCT_TYPE_LIB_DYNAMIC, FILE_TYPE_LIB_DYNAMIC, '.dylib') TARGET_TYPE_STATIC_LIB = (PRODUCT_TYPE_LIB_STATIC, FILE_TYPE_LIB_STATIC, '.a') TARGET_TYPE_EXECUTABLE = (PRODUCT_TYPE_EXECUTABLE, FILE_TYPE_EXECUTABLE, '') # Maps target type string to its data TARGET_TYPES = { 'framework': TARGET_TYPE_FRAMEWORK, 'app': TARGET_TYPE_APPLICATION, 'dylib': TARGET_TYPE_DYNAMIC_LIB, 'stlib': TARGET_TYPE_STATIC_LIB, 'exe' :TARGET_TYPE_EXECUTABLE, } def delete_invalid_values(dct): """ Deletes entries that are dictionaries or sets """ for k, v in list(dct.items()): if isinstance(v, dict) or isinstance(v, set): del dct[k] return dct """ Configuration of the global project settings. Sets an environment variable 'PROJ_CONFIGURATION' which is a dictionary of configuration name and buildsettings pair. E.g.: env.PROJ_CONFIGURATION = { 'Debug': { 'ARCHS': 'x86', ... } 'Release': { 'ARCHS': x86_64' ... } } The user can define a completely customized dictionary in configure() stage. Otherwise a default Debug/Release will be created based on env variable """ def configure(self): if not self.env.PROJ_CONFIGURATION: self.to_log("A default project configuration was created since no custom one was given in the configure(conf) stage. Define your custom project settings by adding PROJ_CONFIGURATION to env. The env.PROJ_CONFIGURATION must be a dictionary with at least one key, where each key is the configuration name, and the value is a dictionary of key/value settings.\n") # Check for any added config files added by the tool 'c_config'. if 'cfg_files' in self.env: self.env.INCLUDES = Utils.to_list(self.env.INCLUDES) + [os.path.abspath(os.path.dirname(f)) for f in self.env.cfg_files] # Create default project configuration? if 'PROJ_CONFIGURATION' not in self.env: defaults = delete_invalid_values(self.env.get_merged_dict()) self.env.PROJ_CONFIGURATION = { "Debug": defaults, "Release": defaults, } # Some build settings are required to be present by XCode. We will supply default values # if user hasn't defined any. defaults_required = [('PRODUCT_NAME', '$(TARGET_NAME)')] for cfgname,settings in self.env.PROJ_CONFIGURATION.items(): for default_var, default_val in defaults_required: if default_var not in settings: settings[default_var] = default_val # Error check customization if not isinstance(self.env.PROJ_CONFIGURATION, dict): raise Errors.ConfigurationError("The env.PROJ_CONFIGURATION must be a dictionary with at least one key, where each key is the configuration name, and the value is a dictionary of key/value settings.") part1 = 0 part2 = 10000 part3 = 0 id = 562000999 def newid(): global id id += 1 return "%04X%04X%04X%012d" % (0, 10000, 0, id) """ Represents a tree node in the XCode project plist file format. When written to a file, all attributes of XCodeNode are stringified together with its value. However, attributes starting with an underscore _ are ignored during that process and allows you to store arbitrary values that are not supposed to be written out. """ class XCodeNode(object): def __init__(self): self._id = newid() self._been_written = False def tostring(self, value): if isinstance(value, dict): result = "{\n" for k,v in value.items(): result = result + "\t\t\t%s = %s;\n" % (k, self.tostring(v)) result = result + "\t\t}" return result elif isinstance(value, str): return '"%s"' % value.replace('"', '\\\\\\"') elif isinstance(value, list): result = "(\n" for i in value: result = result + "\t\t\t\t%s,\n" % self.tostring(i) result = result + "\t\t\t)" return result elif isinstance(value, XCodeNode): return value._id else: return str(value) def write_recursive(self, value, file): if isinstance(value, dict): for k,v in value.items(): self.write_recursive(v, file) elif isinstance(value, list): for i in value: self.write_recursive(i, file) elif isinstance(value, XCodeNode): value.write(file) def write(self, file): if not self._been_written: self._been_written = True for attribute,value in self.__dict__.items(): if attribute[0] != '_': self.write_recursive(value, file) w = file.write w("\t%s = {\n" % self._id) w("\t\tisa = %s;\n" % self.__class__.__name__) for attribute,value in self.__dict__.items(): if attribute[0] != '_': w("\t\t%s = %s;\n" % (attribute, self.tostring(value))) w("\t};\n\n") # Configurations class XCBuildConfiguration(XCodeNode): def __init__(self, name, settings = {}, env=None): XCodeNode.__init__(self) self.baseConfigurationReference = "" self.buildSettings = settings self.name = name if env and env.ARCH: settings['ARCHS'] = " ".join(env.ARCH) class XCConfigurationList(XCodeNode): def __init__(self, configlst): """ :param configlst: list of XCConfigurationList """ XCodeNode.__init__(self) self.buildConfigurations = configlst self.defaultConfigurationIsVisible = 0 self.defaultConfigurationName = configlst and configlst[0].name or "" # Group/Files class PBXFileReference(XCodeNode): def __init__(self, name, path, filetype = '', sourcetree = "SOURCE_ROOT"): XCodeNode.__init__(self) self.fileEncoding = 4 if not filetype: _, ext = os.path.splitext(name) filetype = MAP_EXT.get(ext, 'text') self.lastKnownFileType = filetype self.explicitFileType = filetype self.name = name self.path = path self.sourceTree = sourcetree def __hash__(self): return (self.path+self.name).__hash__() def __eq__(self, other): return (self.path, self.name) == (other.path, other.name) class PBXBuildFile(XCodeNode): """ This element indicate a file reference that is used in a PBXBuildPhase (either as an include or resource). """ def __init__(self, fileRef, settings={}): XCodeNode.__init__(self) # fileRef is a reference to a PBXFileReference object self.fileRef = fileRef # A map of key/value pairs for additional settings. self.settings = settings def __hash__(self): return (self.fileRef).__hash__() def __eq__(self, other): return self.fileRef == other.fileRef class PBXGroup(XCodeNode): def __init__(self, name, sourcetree = 'SOURCE_TREE'): XCodeNode.__init__(self) self.children = [] self.name = name self.sourceTree = sourcetree # Maintain a lookup table for all PBXFileReferences # that are contained in this group. self._filerefs = {} def add(self, sources): """ Add a list of PBXFileReferences to this group :param sources: list of PBXFileReferences objects """ self._filerefs.update(dict(zip(sources, sources))) self.children.extend(sources) def get_sub_groups(self): """ Returns all child PBXGroup objects contained in this group """ return list(filter(lambda x: isinstance(x, PBXGroup), self.children)) def find_fileref(self, fileref): """ Recursively search this group for an existing PBXFileReference. Returns None if none were found. The reason you'd want to reuse existing PBXFileReferences from a PBXGroup is that XCode doesn't like PBXFileReferences that aren't part of a PBXGroup hierarchy. If it isn't, the consequence is that certain UI features like 'Reveal in Finder' stops working. """ if fileref in self._filerefs: return self._filerefs[fileref] elif self.children: for childgroup in self.get_sub_groups(): f = childgroup.find_fileref(fileref) if f: return f return None class PBXContainerItemProxy(XCodeNode): """ This is the element for to decorate a target item. """ def __init__(self, containerPortal, remoteGlobalIDString, remoteInfo='', proxyType=1): XCodeNode.__init__(self) self.containerPortal = containerPortal # PBXProject self.remoteGlobalIDString = remoteGlobalIDString # PBXNativeTarget self.remoteInfo = remoteInfo # Target name self.proxyType = proxyType class PBXTargetDependency(XCodeNode): """ This is the element for referencing other target through content proxies. """ def __init__(self, native_target, proxy): XCodeNode.__init__(self) self.target = native_target self.targetProxy = proxy class PBXFrameworksBuildPhase(XCodeNode): """ This is the element for the framework link build phase, i.e. linking to frameworks """ def __init__(self, pbxbuildfiles): XCodeNode.__init__(self) self.buildActionMask = 2147483647 self.runOnlyForDeploymentPostprocessing = 0 self.files = pbxbuildfiles #List of PBXBuildFile (.o, .framework, .dylib) class PBXHeadersBuildPhase(XCodeNode): """ This is the element for adding header files to be packaged into the .framework """ def __init__(self, pbxbuildfiles): XCodeNode.__init__(self) self.buildActionMask = 2147483647 self.runOnlyForDeploymentPostprocessing = 0 self.files = pbxbuildfiles #List of PBXBuildFile (.o, .framework, .dylib) class PBXCopyFilesBuildPhase(XCodeNode): """ Represents the PBXCopyFilesBuildPhase section. PBXBuildFile can be added to this node to copy files after build is done. """ def __init__(self, pbxbuildfiles, dstpath, dstSubpathSpec=0, *args, **kwargs): XCodeNode.__init__(self) self.files = pbxbuildfiles self.dstPath = dstpath self.dstSubfolderSpec = dstSubpathSpec class PBXSourcesBuildPhase(XCodeNode): """ Represents the 'Compile Sources' build phase in a Xcode target """ def __init__(self, buildfiles): XCodeNode.__init__(self) self.files = buildfiles # List of PBXBuildFile objects class PBXLegacyTarget(XCodeNode): def __init__(self, action, target=''): XCodeNode.__init__(self) self.buildConfigurationList = XCConfigurationList([XCBuildConfiguration('waf', {})]) if not target: self.buildArgumentsString = "%s %s" % (sys.argv[0], action) else: self.buildArgumentsString = "%s %s --targets=%s" % (sys.argv[0], action, target) self.buildPhases = [] self.buildToolPath = sys.executable self.buildWorkingDirectory = "" self.dependencies = [] self.name = target or action self.productName = target or action self.passBuildSettingsInEnvironment = 0 class PBXShellScriptBuildPhase(XCodeNode): def __init__(self, action, target): XCodeNode.__init__(self) self.buildActionMask = 2147483647 self.files = [] self.inputPaths = [] self.outputPaths = [] self.runOnlyForDeploymentPostProcessing = 0 self.shellPath = "/bin/sh" self.shellScript = "%s %s %s --targets=%s" % (sys.executable, sys.argv[0], action, target) class PBXNativeTarget(XCodeNode): """ Represents a target in XCode, e.g. App, DyLib, Framework etc. """ def __init__(self, target, node, target_type=TARGET_TYPE_APPLICATION, configlist=[], buildphases=[]): XCodeNode.__init__(self) product_type = target_type[0] file_type = target_type[1] self.buildConfigurationList = XCConfigurationList(configlist) self.buildPhases = buildphases self.buildRules = [] self.dependencies = [] self.name = target self.productName = target self.productType = product_type # See TARGET_TYPE_ tuples constants self.productReference = PBXFileReference(node.name, node.abspath(), file_type, '') def add_configuration(self, cf): """ :type cf: XCBuildConfiguration """ self.buildConfigurationList.buildConfigurations.append(cf) def add_build_phase(self, phase): # Some build phase types may appear only once. If a phase type already exists, then merge them. if ( (phase.__class__ == PBXFrameworksBuildPhase) or (phase.__class__ == PBXSourcesBuildPhase) ): for b in self.buildPhases: if b.__class__ == phase.__class__: b.files.extend(phase.files) return self.buildPhases.append(phase) def add_dependency(self, depnd): self.dependencies.append(depnd) # Root project object class PBXProject(XCodeNode): def __init__(self, name, version, env): XCodeNode.__init__(self) if not isinstance(env.PROJ_CONFIGURATION, dict): raise Errors.WafError("Error: env.PROJ_CONFIGURATION must be a dictionary. This is done for you if you do not define one yourself. However, did you load the xcode module at the end of your wscript configure() ?") # Retrieve project configuration configurations = [] for config_name, settings in env.PROJ_CONFIGURATION.items(): cf = XCBuildConfiguration(config_name, settings) configurations.append(cf) self.buildConfigurationList = XCConfigurationList(configurations) self.compatibilityVersion = version[0] self.hasScannedForEncodings = 1 self.mainGroup = PBXGroup(name) self.projectRoot = "" self.projectDirPath = "" self.targets = [] self._objectVersion = version[1] def create_target_dependency(self, target, name): """ : param target : PXBNativeTarget """ proxy = PBXContainerItemProxy(self, target, name) dependency = PBXTargetDependency(target, proxy) return dependency def write(self, file): # Make sure this is written only once if self._been_written: return w = file.write w("// !$*UTF8*$!\n") w("{\n") w("\tarchiveVersion = 1;\n") w("\tclasses = {\n") w("\t};\n") w("\tobjectVersion = %d;\n" % self._objectVersion) w("\tobjects = {\n\n") XCodeNode.write(self, file) w("\t};\n") w("\trootObject = %s;\n" % self._id) w("}\n") def add_target(self, target): self.targets.append(target) def get_target(self, name): """ Get a reference to PBXNativeTarget if it exists """ for t in self.targets: if t.name == name: return t return None @TaskGen.feature('c', 'cxx') @TaskGen.after('propagate_uselib_vars', 'apply_incpaths') def process_xcode(self): bld = self.bld try: p = bld.project except AttributeError: return if not hasattr(self, 'target_type'): return products_group = bld.products_group target_group = PBXGroup(self.name) p.mainGroup.children.append(target_group) # Determine what type to build - framework, app bundle etc. target_type = getattr(self, 'target_type', 'app') if target_type not in TARGET_TYPES: raise Errors.WafError("Target type '%s' does not exists. Available options are '%s'. In target '%s'" % (target_type, "', '".join(TARGET_TYPES.keys()), self.name)) else: target_type = TARGET_TYPES[target_type] file_ext = target_type[2] # Create the output node target_node = self.path.find_or_declare(self.name+file_ext) target = PBXNativeTarget(self.name, target_node, target_type, [], []) products_group.children.append(target.productReference) # Pull source files from the 'source' attribute and assign them to a UI group. # Use a default UI group named 'Source' unless the user # provides a 'group_files' dictionary to customize the UI grouping. sources = getattr(self, 'source', []) if hasattr(self, 'group_files'): group_files = getattr(self, 'group_files', []) for grpname,files in group_files.items(): group = bld.create_group(grpname, files) target_group.children.append(group) else: group = bld.create_group('Source', sources) target_group.children.append(group) # Create a PBXFileReference for each source file. # If the source file already exists as a PBXFileReference in any of the UI groups, then # reuse that PBXFileReference object (XCode does not like it if we don't reuse) for idx, path in enumerate(sources): fileref = PBXFileReference(path.name, path.abspath()) existing_fileref = target_group.find_fileref(fileref) if existing_fileref: sources[idx] = existing_fileref else: sources[idx] = fileref # If the 'source' attribute contains any file extension that XCode can't work with, # then remove it. The allowed file extensions are defined in XCODE_EXTS. is_valid_file_extension = lambda file: os.path.splitext(file.path)[1] in XCODE_EXTS sources = list(filter(is_valid_file_extension, sources)) buildfiles = [bld.unique_buildfile(PBXBuildFile(x)) for x in sources] target.add_build_phase(PBXSourcesBuildPhase(buildfiles)) # Check if any framework to link against is some other target we've made libs = getattr(self, 'tmp_use_seen', []) for lib in libs: use_target = p.get_target(lib) if use_target: # Create an XCode dependency so that XCode knows to build the other target before this target dependency = p.create_target_dependency(use_target, use_target.name) target.add_dependency(dependency) buildphase = PBXFrameworksBuildPhase([PBXBuildFile(use_target.productReference)]) target.add_build_phase(buildphase) if lib in self.env.LIB: self.env.LIB = list(filter(lambda x: x != lib, self.env.LIB)) # If 'export_headers' is present, add files to the Headers build phase in xcode. # These are files that'll get packed into the Framework for instance. exp_hdrs = getattr(self, 'export_headers', []) hdrs = bld.as_nodes(Utils.to_list(exp_hdrs)) files = [p.mainGroup.find_fileref(PBXFileReference(n.name, n.abspath())) for n in hdrs] files = [PBXBuildFile(f, {'ATTRIBUTES': ('Public',)}) for f in files] buildphase = PBXHeadersBuildPhase(files) target.add_build_phase(buildphase) # Merge frameworks and libs into one list, and prefix the frameworks frameworks = Utils.to_list(self.env.FRAMEWORK) frameworks = ' '.join(['-framework %s' % (f.split('.framework')[0]) for f in frameworks]) libs = Utils.to_list(self.env.STLIB) + Utils.to_list(self.env.LIB) libs = ' '.join(bld.env['STLIB_ST'] % t for t in libs) # Override target specific build settings bldsettings = { 'HEADER_SEARCH_PATHS': ['$(inherited)'] + self.env['INCPATHS'], 'LIBRARY_SEARCH_PATHS': ['$(inherited)'] + Utils.to_list(self.env.LIBPATH) + Utils.to_list(self.env.STLIBPATH) + Utils.to_list(self.env.LIBDIR), 'FRAMEWORK_SEARCH_PATHS': ['$(inherited)'] + Utils.to_list(self.env.FRAMEWORKPATH), 'OTHER_LDFLAGS': libs + ' ' + frameworks + ' ' + ' '.join(bld.env['LINKFLAGS']), 'OTHER_CPLUSPLUSFLAGS': Utils.to_list(self.env['CXXFLAGS']), 'OTHER_CFLAGS': Utils.to_list(self.env['CFLAGS']), 'INSTALL_PATH': [], 'GCC_PREPROCESSOR_DEFINITIONS': self.env['DEFINES'] } # Install path installpaths = Utils.to_list(getattr(self, 'install', [])) prodbuildfile = PBXBuildFile(target.productReference) for instpath in installpaths: bldsettings['INSTALL_PATH'].append(instpath) target.add_build_phase(PBXCopyFilesBuildPhase([prodbuildfile], instpath)) if not bldsettings['INSTALL_PATH']: del bldsettings['INSTALL_PATH'] # Create build settings which can override the project settings. Defaults to none if user # did not pass argument. This will be filled up with target specific # search paths, libs to link etc. settings = getattr(self, 'settings', {}) # The keys represents different build configuration, e.g. Debug, Release and so on.. # Insert our generated build settings to all configuration names keys = set(settings.keys()) | set(bld.env.PROJ_CONFIGURATION.keys()) for k in keys: if k in settings: settings[k].update(bldsettings) else: settings[k] = bldsettings for k,v in settings.items(): target.add_configuration(XCBuildConfiguration(k, v)) p.add_target(target) class xcode(Build.BuildContext): cmd = 'xcode6' fun = 'build' def as_nodes(self, files): """ Returns a list of waflib.Nodes from a list of string of file paths """ nodes = [] for x in files: if not isinstance(x, str): d = x else: d = self.srcnode.find_node(x) if not d: raise Errors.WafError('File \'%s\' was not found' % x) nodes.append(d) return nodes def create_group(self, name, files): """ Returns a new PBXGroup containing the files (paths) passed in the files arg :type files: string """ group = PBXGroup(name) """ Do not use unique file reference here, since XCode seem to allow only one file reference to be referenced by a group. """ files_ = [] for d in self.as_nodes(Utils.to_list(files)): fileref = PBXFileReference(d.name, d.abspath()) files_.append(fileref) group.add(files_) return group def unique_buildfile(self, buildfile): """ Returns a unique buildfile, possibly an existing one. Use this after you've constructed a PBXBuildFile to make sure there is only one PBXBuildFile for the same file in the same project. """ try: build_files = self.build_files except AttributeError: build_files = self.build_files = {} if buildfile not in build_files: build_files[buildfile] = buildfile return build_files[buildfile] def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) appname = getattr(Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath())) p = PBXProject(appname, ('Xcode 3.2', 46), self.env) # If we don't create a Products group, then # XCode will create one, which entails that # we'll start to see duplicate files in the UI # for some reason. products_group = PBXGroup('Products') p.mainGroup.children.append(products_group) self.project = p self.products_group = products_group # post all task generators # the process_xcode method above will be called for each target if self.targets and self.targets != '*': (self._min_grp, self._exact_tg) = self.get_targets() self.current_group = 0 while self.current_group < len(self.groups): self.post_group() self.current_group += 1 node = self.bldnode.make_node('%s.xcodeproj' % appname) node.mkdir() node = node.make_node('project.pbxproj') with open(node.abspath(), 'w') as f: p.write(f) Logs.pprint('GREEN', 'Wrote %r' % node.abspath()) def bind_fun(tgtype): def fun(self, *k, **kw): tgtype = fun.__name__ if tgtype == 'shlib' or tgtype == 'dylib': features = 'cxx cxxshlib' tgtype = 'dylib' elif tgtype == 'framework': features = 'cxx cxxshlib' tgtype = 'framework' elif tgtype == 'program': features = 'cxx cxxprogram' tgtype = 'exe' elif tgtype == 'app': features = 'cxx cxxprogram' tgtype = 'app' elif tgtype == 'stlib': features = 'cxx cxxstlib' tgtype = 'stlib' lst = kw['features'] = Utils.to_list(kw.get('features', [])) for x in features.split(): if not x in kw['features']: lst.append(x) kw['target_type'] = tgtype return self(*k, **kw) fun.__name__ = tgtype setattr(Build.BuildContext, tgtype, fun) return fun for xx in 'app framework dylib shlib stlib program'.split(): bind_fun(xx) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/fixpy2.py0000660000000000000000000000263500000000000021206 0ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2018 (ita) from __future__ import with_statement import os all_modifs = {} def fixdir(dir): """Call all substitution functions on Waf folders""" for k in all_modifs: for v in all_modifs[k]: modif(os.path.join(dir, 'waflib'), k, v) def modif(dir, name, fun): """Call a substitution function""" if name == '*': lst = [] for y in '. Tools extras'.split(): for x in os.listdir(os.path.join(dir, y)): if x.endswith('.py'): lst.append(y + os.sep + x) for x in lst: modif(dir, x, fun) return filename = os.path.join(dir, name) with open(filename, 'r') as f: txt = f.read() txt = fun(txt) with open(filename, 'w') as f: f.write(txt) def subst(*k): """register a substitution function""" def do_subst(fun): for x in k: try: all_modifs[x].append(fun) except KeyError: all_modifs[x] = [fun] return fun return do_subst @subst('*') def r1(code): "utf-8 fixes for python < 2.6" code = code.replace('as e:', ',e:') code = code.replace(".decode(sys.stdout.encoding or'latin-1',errors='replace')", '') return code.replace('.encode()', '') @subst('Runner.py') def r4(code): "generator syntax" return code.replace('next(self.biter)', 'self.biter.next()') @subst('Context.py') def r5(code): return code.replace("('Execution failure: %s'%str(e),ex=e)", "('Execution failure: %s'%str(e),ex=e),None,sys.exc_info()[2]") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1579615789.0292094 tevent-0.11.0/third_party/waf/waflib/processor.py0000770000000000000000000000310100000000000021773 0ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) import os, sys, traceback, base64, signal try: import cPickle except ImportError: import pickle as cPickle try: import subprocess32 as subprocess except ImportError: import subprocess try: TimeoutExpired = subprocess.TimeoutExpired except AttributeError: class TimeoutExpired(Exception): pass def run(): txt = sys.stdin.readline().strip() if not txt: # parent process probably ended sys.exit(1) [cmd, kwargs, cargs] = cPickle.loads(base64.b64decode(txt)) cargs = cargs or {} if not 'close_fds' in kwargs: # workers have no fds kwargs['close_fds'] = False ret = 1 out, err, ex, trace = (None, None, None, None) try: proc = subprocess.Popen(cmd, **kwargs) try: out, err = proc.communicate(**cargs) except TimeoutExpired: if kwargs.get('start_new_session') and hasattr(os, 'killpg'): os.killpg(proc.pid, signal.SIGKILL) else: proc.kill() out, err = proc.communicate() exc = TimeoutExpired(proc.args, timeout=cargs['timeout'], output=out) exc.stderr = err raise exc ret = proc.returncode except Exception as e: exc_type, exc_value, tb = sys.exc_info() exc_lines = traceback.format_exception(exc_type, exc_value, tb) trace = str(cmd) + '\n' + ''.join(exc_lines) ex = e.__class__.__name__ # it is just text so maybe we do not need to pickle() tmp = [ret, out, err, ex, trace] obj = base64.b64encode(cPickle.dumps(tmp)) sys.stdout.write(obj.decode()) sys.stdout.write('\n') sys.stdout.flush() while 1: try: run() except KeyboardInterrupt: break